diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 000000000..7958c0e64 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,37 @@ +name: Bug Report +description: File a bug report +title: "[Bug]: " +labels: ["bug"] +body: + - type: input + id: package-version + attributes: + label: Package + description: What version of the linode_api4 package are you using? + placeholder: 5.3.0 + validations: + required: true + + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: What should have happened? + + - type: textarea + id: actual + attributes: + label: Actual Behavior + description: What actually happened? + + - type: textarea + id: reproduce + attributes: + label: Steps to Reproduce + description: List any custom configurations and the steps to reproduce this error + + - type: textarea + id: error + attributes: + label: Error Output + description: If you received an error output that is too long, use Gists \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml new file mode 100644 index 000000000..e375d78a5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -0,0 +1,12 @@ +name: Enhancement +description: Request a feature +title: "[Feature]: " +labels: ["enhancement"] +body: + - type: textarea + id: description + attributes: + label: Description + description: What would you like this feature to do in detail? + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/help.yml b/.github/ISSUE_TEMPLATE/help.yml new file mode 100644 index 000000000..e822ee980 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/help.yml @@ -0,0 +1,12 @@ +name: Help +description: You're pretty sure it's not a bug but you can't figure out why it's not working +title: "[Help]: " +labels: ["help wanted"] +body: + - type: textarea + id: description + attributes: + label: Description + description: What are you attempting to do, what error messages are you getting? + validations: + required: true \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..226428122 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,16 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "pip" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" diff --git a/.github/labels.yml b/.github/labels.yml new file mode 100644 index 000000000..83989042c --- /dev/null +++ b/.github/labels.yml @@ -0,0 +1,41 @@ +# PR Labels +- name: new-feature + description: for new features in the changelog. + color: 225fee +- name: project + description: for new projects in the changelog. + color: 46BAF0 +- name: improvement + description: for improvements in existing functionality in the changelog. + color: 22ee47 +- name: repo-ci-improvement + description: for improvements in the repository or CI workflow in the changelog. + color: c922ee +- name: bugfix + description: for any bug fixes in the changelog. + color: ed8e21 +- name: documentation + description: for updates to the documentation in the changelog. + color: d3e1e6 +- name: dependencies + description: dependency updates usually from dependabot + color: 5c9dff +- name: testing + description: for updates to the testing suite in the changelog. + color: 933ac9 +- name: breaking-change + description: for breaking changes in the changelog. + color: ff0000 +- name: ignore-for-release + description: PRs you do not want to render in the changelog + color: 7b8eac +- name: do-not-merge + description: PRs that should not be merged until the commented issue is resolved + color: eb1515 +# Issue Labels +- name: enhancement + description: issues that request a enhancement + color: 22ee47 +- name: bug + description: issues that report a bug + color: ed8e21 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..d97f93452 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,9 @@ +## ๐Ÿ“ Description + +**What does this PR do and why is this change necessary?** + +## โœ”๏ธ How to Test + +**What are the steps to reproduce the issue or verify the changes?** + +**How do I run the relevant unit/integration tests?** diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 000000000..a2318fa64 --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,35 @@ +changelog: + exclude: + labels: + - ignore-for-release + categories: + - title: ๐Ÿ“‹ New Project + labels: + - project + - title: โš ๏ธ Breaking Change + labels: + - breaking-change + - title: ๐Ÿ› Bug Fixes + labels: + - bugfix + - title: ๐Ÿš€ New Features + labels: + - new-feature + - title: ๐Ÿ’ก Improvements + labels: + - improvement + - title: ๐Ÿงช Testing Improvements + labels: + - testing + - title: โš™๏ธ Repo/CI Improvements + labels: + - repo-ci-improvement + - title: ๐Ÿ“– Documentation + labels: + - documentation + - title: ๐Ÿ“ฆ Dependency Updates + labels: + - dependencies + - title: Other Changes + labels: + - "*" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..dd8eeea17 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,43 @@ + +name: Continuous Integration + +on: + push: + branches: + - dev + - main + pull_request: + workflow_dispatch: + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - name: checkout repo + uses: actions/checkout@v6 + + - name: setup python 3 + uses: actions/setup-python@v6 + with: + python-version: '3.x' + + - name: install dependencies + run: make dev-install + + - name: run linter + run: make lint + + build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-python@v6 + with: + python-version: ${{ matrix.python-version }} + - name: Run tests + run: | + pip install ".[test]" + tox diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000..c7b208528 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,38 @@ +name: "CodeQL Advanced" + +on: + push: + branches: [ "dev", "main", "proj/*" ] + pull_request: + branches: [ "dev", "main", "proj/*" ] + schedule: + - cron: '39 0 * * 6' + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + runs-on: ubuntu-latest + permissions: + security-events: write + + strategy: + fail-fast: false + matrix: + include: + - language: python + build-mode: none + steps: + - name: Checkout repository + uses: actions/checkout@v6 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v4 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + queries: security-and-quality + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml new file mode 100644 index 000000000..ffba32062 --- /dev/null +++ b/.github/workflows/dependency-review.yml @@ -0,0 +1,18 @@ +name: 'Dependency review' +on: + pull_request: + branches: [ "dev", "main", "proj/*" ] +permissions: + contents: read + pull-requests: write + +jobs: + dependency-review: + runs-on: ubuntu-latest + steps: + - name: 'Checkout repository' + uses: actions/checkout@v6 + - name: 'Dependency Review' + uses: actions/dependency-review-action@v4 + with: + comment-summary-in-pr: on-failure diff --git a/.github/workflows/e2e-test-pr.yml b/.github/workflows/e2e-test-pr.yml new file mode 100644 index 000000000..f765b0a0d --- /dev/null +++ b/.github/workflows/e2e-test-pr.yml @@ -0,0 +1,209 @@ +on: + pull_request: + workflow_dispatch: + inputs: + run_db_fork_tests: + description: 'Set this parameter to "true" to run fork database related test cases' + required: false + default: 'false' + type: choice + options: + - 'true' + - 'false' + run_db_tests: + description: 'Set this parameter to "true" to run database related test cases' + required: false + default: 'false' + type: choice + options: + - 'true' + - 'false' + test_suite: + description: 'Enter specific test suite. E.g. domain, linode_client' + required: false + sha: + description: 'The hash value of the commit.' + required: true + pull_request_number: + description: 'The number of the PR.' + required: false + test_report_upload: + description: 'Indicates whether to upload the test report to object storage. Defaults to "false"' + required: false + default: 'false' + type: choice + options: + - 'true' + - 'false' + +name: PR E2E Tests + +jobs: + integration-fork-ubuntu: + runs-on: ubuntu-latest + if: + github.event_name == 'workflow_dispatch' && inputs.sha != '' + env: + EXIT_STATUS: 0 + + steps: + - uses: actions-ecosystem/action-regex-match@v2 + id: validate-tests + with: + text: ${{ inputs.test_suite }} + regex: '[^a-z0-9-:.\/_]' # Tests validation + flags: gi + + # Check out merge commit + - name: Checkout PR + uses: actions/checkout@v6 + with: + ref: ${{ inputs.sha }} + fetch-depth: 0 + submodules: 'recursive' + + - name: Get the hash value of the latest commit from the PR branch + uses: octokit/graphql-action@v2.x + id: commit-hash + if: ${{ inputs.pull_request_number != '' }} + with: + query: | + query PRHeadCommitHash($owner: String!, $repo: String!, $pr_num: Int!) { + repository(owner:$owner, name:$repo) { + pullRequest(number: $pr_num) { + headRef { + target { + ... on Commit { + oid + } + } + } + } + } + } + owner: ${{ github.event.repository.owner.login }} + repo: ${{ github.event.repository.name }} + pr_num: ${{ fromJSON(inputs.pull_request_number) }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Python + uses: actions/setup-python@v6 + with: + python-version: '3.x' + + - name: Install Python deps + run: pip install -U setuptools wheel boto3 certifi + + - name: Install Python SDK + run: make dev-install + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Run Integration tests + run: | + timestamp=$(date +'%Y%m%d%H%M') + report_filename="${timestamp}_sdk_test_report.xml" + make test-int RUN_DB_FORK_TESTS=${{ github.event.inputs.run_db_fork_tests }} RUN_DB_TESTS=${{ github.event.inputs.run_db_tests }} TEST_ARGS="--junitxml=${report_filename}" TEST_SUITE="${{ github.event.inputs.test_suite }}" + env: + LINODE_TOKEN: ${{ secrets.LINODE_TOKEN }} + + - name: Upload test results + if: always() && github.repository == 'linode/linode_api4-python' && (github.event_name == 'pull_request' || (github.event_name == 'workflow_dispatch' && inputs.test_report_upload == 'true')) + run: | + filename=$(ls | grep -E '^[0-9]{12}_sdk_test_report\.xml$') + python3 e2e_scripts/tod_scripts/xml_to_obj_storage/scripts/add_gha_info_to_xml.py \ + --branch_name "${GITHUB_REF#refs/*/}" \ + --gha_run_id "$GITHUB_RUN_ID" \ + --gha_run_number "$GITHUB_RUN_NUMBER" \ + --xmlfile "${filename}" + sync + python3 e2e_scripts/tod_scripts/xml_to_obj_storage/scripts/xml_to_obj.py "${filename}" + env: + LINODE_CLI_OBJ_ACCESS_KEY: ${{ secrets.LINODE_CLI_OBJ_ACCESS_KEY }} + LINODE_CLI_OBJ_SECRET_KEY: ${{ secrets.LINODE_CLI_OBJ_SECRET_KEY }} + + - uses: actions/github-script@v8 + id: update-check-run + if: ${{ inputs.pull_request_number != '' && fromJson(steps.commit-hash.outputs.data).repository.pullRequest.headRef.target.oid == inputs.sha }} + env: + number: ${{ inputs.pull_request_number }} + job: ${{ github.job }} + conclusion: ${{ job.status }} + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const { data: pull } = await github.rest.pulls.get({ + ...context.repo, + pull_number: process.env.number + }); + const ref = pull.head.sha; + const { data: checks } = await github.rest.checks.listForRef({ + ...context.repo, + ref + }); + const check = checks.check_runs.filter(c => c.name === process.env.job); + const { data: result } = await github.rest.checks.update({ + ...context.repo, + check_run_id: check[0].id, + status: 'completed', + conclusion: process.env.conclusion + }); + return result; + + apply-calico-rules: + runs-on: ubuntu-latest + needs: [integration-fork-ubuntu] + if: ${{ success() || failure() }} + + steps: + - name: Checkout code + uses: actions/checkout@v6 + with: + fetch-depth: 0 + submodules: 'recursive' + + - name: Download kubectl and calicoctl for LKE clusters + run: | + curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" + curl -LO "https://github.com/projectcalico/calico/releases/download/v3.25.0/calicoctl-linux-amd64" + chmod +x calicoctl-linux-amd64 kubectl + mv calicoctl-linux-amd64 /usr/local/bin/calicoctl + mv kubectl /usr/local/bin/kubectl + + - name: Apply Calico Rules to LKE + run: | + cd e2e_scripts/cloud_security_scripts/lke_calico_rules/ && ./lke_calico_rules_e2e.sh + env: + LINODE_TOKEN: ${{ secrets.LINODE_TOKEN }} + + add-fw-to-remaining-instances: + runs-on: ubuntu-latest + needs: [integration-fork-ubuntu] + if: ${{ success() || failure() }} + + steps: + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.x' + + - name: Install Linode CLI + run: | + pip install linode-cli + + - name: Create Firewall and Attach to Instances + run: | + FIREWALL_ID=$(linode-cli firewalls create --label "e2e-fw-$(date +%s)" --rules.inbound_policy "DROP" --rules.outbound_policy "ACCEPT" --text --format=id --no-headers) + echo "Created Firewall with ID: $FIREWALL_ID" + + for instance_id in $(linode-cli linodes list --format "id" --text --no-header); do + echo "Attaching firewall to instance: $instance_id" + if linode-cli firewalls device-create "$FIREWALL_ID" --id "$instance_id" --type linode; then + echo "Firewall attached to instance $instance_id successfully." + else + echo "An error occurred while attaching firewall to instance $instance_id. Skipping..." + fi + done + env: + LINODE_CLI_TOKEN: ${{ secrets.LINODE_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml new file mode 100644 index 000000000..df1a41841 --- /dev/null +++ b/.github/workflows/e2e-test.yml @@ -0,0 +1,283 @@ +name: Integration Tests + +on: + workflow_dispatch: + inputs: + run_db_fork_tests: + description: 'Set this parameter to "true" to run fork database related test cases' + required: false + default: 'false' + type: choice + options: + - 'true' + - 'false' + run_db_tests: + description: 'Set this parameter to "true" to run database related test cases' + required: false + default: 'false' + type: choice + options: + - 'true' + - 'false' + test_suite: + description: 'Enter specific test suite. E.g. domain, linode_client' + required: false + use_minimal_test_account: + description: 'Indicate whether to use a minimal test account with limited resources for testing. Defaults to "false"' + required: false + default: 'false' + sha: + description: 'Specify commit hash to test. This value is mandatory to ensure the tests run against a specific commit' + required: true + default: '' + python-version: + description: 'Specify the Python version to use for running tests. Leave empty to use the default Python version configured in the environment' + required: false + run-eol-python-version: + description: 'Indicates whether to run tests using an End-of-Life (EOL) Python version. Defaults to "false". Choose "true" to include tests for deprecated Python versions' + required: false + default: 'false' + type: choice + options: + - 'true' + - 'false' + test_report_upload: + description: 'Indicates whether to upload the test report to object storage. Defaults to "false"' + type: choice + required: false + default: 'false' + options: + - 'true' + - 'false' + push: + branches: + - main + - dev + +env: + DEFAULT_PYTHON_VERSION: "3.10" + EOL_PYTHON_VERSION: "3.9" + EXIT_STATUS: 0 + +jobs: + integration-tests: + runs-on: ubuntu-latest + steps: + - name: Clone Repository with SHA + if: ${{ inputs.sha != '' }} + uses: actions/checkout@v6 + with: + fetch-depth: 0 + submodules: 'recursive' + ref: ${{ inputs.sha }} + + - name: Clone Repository without SHA + if: ${{ inputs.sha == '' }} + uses: actions/checkout@v6 + with: + fetch-depth: 0 + submodules: 'recursive' + + - name: Setup Python + uses: actions/setup-python@v6 + with: + python-version: ${{ inputs.run-eol-python-version == 'true' && env.EOL_PYTHON_VERSION || inputs.python-version || env.DEFAULT_PYTHON_VERSION }} + + - name: Install Python deps + run: pip install -U setuptools wheel boto3 certifi + + - name: Install Python SDK + run: make dev-install + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Set LINODE_TOKEN + run: | + echo "LINODE_TOKEN=${{ secrets[inputs.use_minimal_test_account == 'true' && 'MINIMAL_LINODE_TOKEN' || 'LINODE_TOKEN'] }}" >> $GITHUB_ENV + + - name: Run Integration tests + run: | + timestamp=$(date +'%Y%m%d%H%M') + report_filename="${timestamp}_sdk_test_report.xml" + make test-int RUN_DB_FORK_TESTS=${{ github.event.inputs.run_db_fork_tests }} RUN_DB_TESTS=${{ github.event.inputs.run_db_tests }} TEST_SUITE="${{ github.event.inputs.test_suite }}" TEST_ARGS="--junitxml=${report_filename}" + env: + LINODE_TOKEN: ${{ env.LINODE_TOKEN }} + + - name: Upload Test Report as Artifact + if: always() + uses: actions/upload-artifact@v6 + with: + name: test-report-file + if-no-files-found: ignore + path: '*.xml' + retention-days: 1 + + apply-calico-rules: + runs-on: ubuntu-latest + needs: [integration-tests] + if: ${{ success() || failure() }} + + steps: + - name: Checkout code + uses: actions/checkout@v6 + with: + fetch-depth: 0 + submodules: 'recursive' + + - name: Set LINODE_TOKEN + run: | + echo "LINODE_TOKEN=${{ secrets[inputs.use_minimal_test_account == 'true' && 'MINIMAL_LINODE_TOKEN' || 'LINODE_TOKEN'] }}" >> $GITHUB_ENV + + - name: Download kubectl and calicoctl for LKE clusters + run: | + curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" + curl -LO "https://github.com/projectcalico/calico/releases/download/v3.25.0/calicoctl-linux-amd64" + chmod +x calicoctl-linux-amd64 kubectl + mv calicoctl-linux-amd64 /usr/local/bin/calicoctl + mv kubectl /usr/local/bin/kubectl + + - name: Apply Calico Rules to LKE + run: | + cd e2e_scripts/cloud_security_scripts/lke_calico_rules/ && ./lke_calico_rules_e2e.sh + env: + LINODE_TOKEN: ${{ env.LINODE_TOKEN }} + + add-fw-to-remaining-instances: + runs-on: ubuntu-latest + needs: [integration-tests] + if: ${{ success() || failure() }} + + steps: + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.x' + + - name: Install Linode CLI + run: | + pip install linode-cli + + - name: Set LINODE_TOKEN + run: | + echo "LINODE_TOKEN=${{ secrets[inputs.use_minimal_test_account == 'true' && 'MINIMAL_LINODE_TOKEN' || 'LINODE_TOKEN'] }}" >> $GITHUB_ENV + + - name: Create Firewall and Attach to Instances + run: | + FIREWALL_ID=$(linode-cli firewalls create --label "e2e-fw-$(date +%s)" --rules.inbound_policy "DROP" --rules.outbound_policy "ACCEPT" --text --format=id --no-headers) + echo "Created Firewall with ID: $FIREWALL_ID" + + for instance_id in $(linode-cli linodes list --format "id" --text --no-header); do + echo "Attaching firewall to instance: $instance_id" + if linode-cli firewalls device-create "$FIREWALL_ID" --id "$instance_id" --type linode; then + echo "Firewall attached to instance $instance_id successfully." + else + echo "An error occurred while attaching firewall to instance $instance_id. Skipping..." + fi + done + env: + LINODE_CLI_TOKEN: ${{ env.LINODE_TOKEN }} + + process-upload-report: + runs-on: ubuntu-latest + needs: [integration-tests] + # Run even if integration tests fail on main repository AND push event OR test_report_upload is true in case of manual run + if: always() && github.repository == 'linode/linode_api4-python' && (github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.test_report_upload == 'true')) + outputs: + summary: ${{ steps.set-test-summary.outputs.summary }} + + steps: + - name: Checkout code + uses: actions/checkout@v6 + with: + fetch-depth: 0 + submodules: 'recursive' + + - name: Download test report + uses: actions/download-artifact@v8 + with: + name: test-report-file + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.x' + + - name: Install Python dependencies + run: pip3 install requests wheel boto3==1.35.99 + + - name: Set release version env + run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + + - name: Add variables and upload test results + if: always() + run: | + filename=$(ls | grep -E '^[0-9]{12}_sdk_test_report\.xml$') + python3 e2e_scripts/tod_scripts/xml_to_obj_storage/scripts/add_gha_info_to_xml.py \ + --branch_name "${GITHUB_REF#refs/*/}" \ + --gha_run_id "$GITHUB_RUN_ID" \ + --gha_run_number "$GITHUB_RUN_NUMBER" \ + --xmlfile "${filename}" + sync + python3 e2e_scripts/tod_scripts/xml_to_obj_storage/scripts/xml_to_obj.py "${filename}" + env: + LINODE_CLI_OBJ_ACCESS_KEY: ${{ secrets.LINODE_CLI_OBJ_ACCESS_KEY }} + LINODE_CLI_OBJ_SECRET_KEY: ${{ secrets.LINODE_CLI_OBJ_SECRET_KEY }} + + - name: Generate test summary and save to output + id: set-test-summary + run: | + filename=$(ls | grep -E '^[0-9]{12}_sdk_test_report\.xml$') + test_output=$(python3 e2e_scripts/tod_scripts/generate_test_summary.py "${filename}") + { + echo 'summary<> "$GITHUB_OUTPUT" + + notify-slack: + runs-on: ubuntu-latest + needs: [integration-tests, process-upload-report] + if: ${{ (success() || failure()) }} # Run even if integration tests fail and only on main repository + steps: + - name: Notify Slack + id: main_message + uses: slackapi/slack-github-action@v2.1.1 + with: + method: chat.postMessage + token: ${{ secrets.SLACK_BOT_TOKEN }} + payload: | + channel: ${{ secrets.SLACK_CHANNEL_ID }} + blocks: + - type: section + text: + type: mrkdwn + text: ":rocket: *${{ github.workflow }} Completed in: ${{ github.repository }}* ${{ needs.integration-tests.result == 'success' && ':white_check_mark:' || ':failed:' }}" + - type: divider + - type: section + fields: + - type: mrkdwn + text: "*Build Result:*\n${{ needs.integration-tests.result == 'success' && ':large_green_circle: Build Passed' || ':red_circle: Build Failed' }}" + - type: mrkdwn + text: "*Branch:*\n`${{ github.ref_name }}`" + - type: section + fields: + - type: mrkdwn + text: "*Commit Hash:*\n<${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}>" + - type: mrkdwn + text: "*Run URL:*\n<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|View Run Details>" + - type: divider + - type: context + elements: + - type: mrkdwn + text: "Triggered by: :bust_in_silhouette: `${{ github.actor }}`" + + - name: Test summary thread + if: success() + uses: slackapi/slack-github-action@v2.1.1 + with: + method: chat.postMessage + token: ${{ secrets.SLACK_BOT_TOKEN }} + payload: | + channel: ${{ secrets.SLACK_CHANNEL_ID }} + thread_ts: "${{ steps.main_message.outputs.ts }}" + text: "${{ needs.process-upload-report.outputs.summary }}" \ No newline at end of file diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 000000000..14e770b11 --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,31 @@ +name: labeler + +on: + push: + branches: + - 'main' + paths: + - '.github/labels.yml' + - '.github/workflows/labeler.yml' + pull_request: + paths: + - '.github/labels.yml' + - '.github/workflows/labeler.yml' + +jobs: + labeler: + runs-on: ubuntu-latest + steps: + - + name: Checkout + uses: actions/checkout@v6 + - + name: Run Labeler + uses: crazy-max/ghaction-github-labeler@548a7c3603594ec17c819e1239f281a3b801ab4d + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + yaml-file: .github/labels.yml + dry-run: ${{ github.event_name == 'pull_request' }} + exclude: | + help* + *issue diff --git a/.github/workflows/nightly-smoke-tests.yml b/.github/workflows/nightly-smoke-tests.yml new file mode 100644 index 000000000..644ea9ce4 --- /dev/null +++ b/.github/workflows/nightly-smoke-tests.yml @@ -0,0 +1,77 @@ +name: Nightly Smoke Tests + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + inputs: + sha: + description: 'Commit SHA to test' + required: false + default: '' + type: string + + +jobs: + smoke_tests: + if: github.repository == 'linode/linode_api4-python' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v6 + with: + ref: dev + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.x' + + - name: Install Python deps + run: pip install -U setuptools wheel boto3 certifi + + - name: Install Python SDK + run: make dev-install + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Run smoke tests + id: smoke_tests + run: | + make test-smoke + env: + LINODE_TOKEN: ${{ secrets.LINODE_TOKEN }} + + - name: Notify Slack + if: always() && github.repository == 'linode/linode_api4-python' + uses: slackapi/slack-github-action@v2.1.1 + with: + method: chat.postMessage + token: ${{ secrets.SLACK_BOT_TOKEN }} + payload: | + channel: ${{ secrets.SLACK_CHANNEL_ID }} + blocks: + - type: section + text: + type: mrkdwn + text: ":rocket: *${{ github.workflow }} Completed in: ${{ github.repository }}* :white_check_mark:" + - type: divider + - type: section + fields: + - type: mrkdwn + text: "*Build Result:*\n${{ steps.smoke_tests.outcome == 'success' && ':large_green_circle: Build Passed' || ':red_circle: Build Failed' }}" + - type: mrkdwn + text: "*Branch:*\n`${{ github.ref_name }}`" + - type: section + fields: + - type: mrkdwn + text: "*Commit Hash:*\n<${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}>" + - type: mrkdwn + text: "*Run URL:*\n<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|View Run Details>" + - type: divider + - type: context + elements: + - type: mrkdwn + text: "Triggered by: :bust_in_silhouette: `${{ github.actor }}`" + diff --git a/.github/workflows/publish-pypi.yaml b/.github/workflows/publish-pypi.yaml new file mode 100644 index 000000000..a791be4c9 --- /dev/null +++ b/.github/workflows/publish-pypi.yaml @@ -0,0 +1,31 @@ +name: release +on: + workflow_dispatch: null + release: + types: [ published ] +jobs: + pypi-release: + permissions: + # IMPORTANT: this permission is mandatory for trusted publishing + id-token: write + runs-on: ubuntu-latest + environment: pypi-release + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Setup Python + uses: actions/setup-python@v6 + with: + python-version: '3.x' + + - name: Install Python deps + run: pip install -U wheel build certifi + + - name: Build the package + run: make build + env: + LINODE_SDK_VERSION: ${{ github.event.release.tag_name }} + + - name: Publish the release artifacts to PyPI + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # pin@release/v1.13.0 diff --git a/.github/workflows/release-cross-repo-test.yml b/.github/workflows/release-cross-repo-test.yml new file mode 100644 index 000000000..69bf8031f --- /dev/null +++ b/.github/workflows/release-cross-repo-test.yml @@ -0,0 +1,66 @@ +name: Release Ansible cross repository test + +on: + pull_request: + branches: + - main + types: [opened] # Workflow will only be executed when PR is opened to main branch + workflow_dispatch: # Manual trigger + + +jobs: + ansible_integration_test: + runs-on: ubuntu-latest + steps: + - name: Checkout linode_api4 repository + uses: actions/checkout@v6 + with: + fetch-depth: 0 + submodules: 'recursive' + + - name: update packages + run: sudo apt-get update -y + + - name: install make + run: sudo apt-get install -y build-essential + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.10' + + - name: Checkout ansible repo + uses: actions/checkout@v6 + with: + repository: linode/ansible_linode + path: .ansible/collections/ansible_collections/linode/cloud + fetch-depth: 0 + submodules: 'recursive' + + - name: install dependencies + run: | + cd .ansible/collections/ansible_collections/linode/cloud + pip install -r requirements.txt -r requirements-dev.txt --upgrade-strategy only-if-needed + + - name: install ansible dependencies + run: ansible-galaxy collection install amazon.aws:==9.1.0 + + - name: install collection + run: | + cd .ansible/collections/ansible_collections/linode/cloud + make install + + - name: Install linode_api4 # Need to install from source after all ansible dependencies have been installed + run: make install + + - name: replace existing keys + run: | + cd .ansible/collections/ansible_collections/linode/cloud + rm -rf ~/.ansible/test && mkdir -p ~/.ansible/test && ssh-keygen -m PEM -q -t rsa -N '' -f ~/.ansible/test/id_rsa + + - name: Run Ansible Tests + run: | + cd .ansible/collections/ansible_collections/linode/cloud + make testall + env: + LINODE_API_TOKEN: ${{ secrets.LINODE_TOKEN }} diff --git a/.github/workflows/release-notify-slack.yml b/.github/workflows/release-notify-slack.yml new file mode 100644 index 000000000..4b01f094b --- /dev/null +++ b/.github/workflows/release-notify-slack.yml @@ -0,0 +1,24 @@ +name: Notify Dev DX Channel on Release +on: + release: + types: [published] + workflow_dispatch: null + +jobs: + notify: + if: github.repository == 'linode/linode_api4-python' + runs-on: ubuntu-latest + steps: + - name: Notify Slack - Main Message + id: main_message + uses: slackapi/slack-github-action@v2.1.1 + with: + method: chat.postMessage + token: ${{ secrets.SLACK_BOT_TOKEN }} + payload: | + channel: ${{ secrets.DEV_DX_SLACK_CHANNEL_ID }} + blocks: + - type: section + text: + type: mrkdwn + text: "*New Release Published: _linode_api4-python_ <${{ github.event.release.html_url }}|${{ github.event.release.tag_name }}> is now live!* :tada:" \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0e3d032e4..7beded74d 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,13 @@ __pycache__ build dist *.egg-info +.eggs/* +docs/_build/* +.cache/* +.coverage +.pytest_cache/* +.tox/* +venv +baked_version +.vscode +.DS_Store diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..1a19a1c1a --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "e2e_scripts"] + path = e2e_scripts + url = https://github.com/linode/dx-e2e-test-scripts diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 000000000..49a156351 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,425 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. +jobs=1 + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Specify a configuration file. +#rcfile= + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=consider-using-dict-items,blacklisted-name,invalid-name,missing-docstring,empty-docstring,unneeded-not,singleton-comparison,misplaced-comparison-constant,unidiomatic-typecheck,consider-using-enumerate,consider-iterating-dictionary,bad-classmethod-argument,bad-mcs-method-argument,bad-mcs-classmethod-argument,single-string-used-for-slots,line-too-long,too-many-lines,trailing-whitespace,missing-final-newline,trailing-newlines,multiple-statements,superfluous-parens,bad-whitespace,mixed-line-endings,unexpected-line-ending-format,bad-continuation,wrong-spelling-in-comment,wrong-spelling-in-docstring,invalid-characters-in-docstring,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,old-style-class,len-as-condition,fatal,astroid-error,parse-error,method-check-failed,raw-checker-failed,bad-inline-option,locally-disabled,locally-enabled,file-ignored,suppressed-message,useless-suppression,deprecated-pragma,literal-comparison,no-self-use,no-classmethod-decorator,no-staticmethod-decorator,cyclic-import,duplicate-code,too-many-ancestors,too-many-instance-attributes,too-few-public-methods,too-many-public-methods,too-many-return-statements,too-many-branches,too-many-arguments,too-many-locals,too-many-statements,too-many-boolean-expressions,consider-merging-isinstance,too-many-nested-blocks,simplifiable-if-statement,redefined-argument-from-local,no-else-return,consider-using-ternary,trailing-comma-tuple,unreachable,dangerous-default-value,pointless-statement,pointless-string-statement,expression-not-assigned,unnecessary-pass,unnecessary-lambda,duplicate-key,deprecated-lambda,assign-to-new-keyword,useless-else-on-loop,exec-used,eval-used,confusing-with-statement,using-constant-test,lost-exception,assert-on-tuple,attribute-defined-outside-init,bad-staticmethod-argument,protected-access,arguments-differ,signature-differs,abstract-method,super-init-not-called,no-init,non-parent-init-called,useless-super-delegation,unnecessary-semicolon,bad-indentation,mixed-indentation,lowercase-l-suffix,wildcard-import,deprecated-module,relative-import,reimported,import-self,misplaced-future,fixme,invalid-encoded-data,global-variable-undefined,global-variable-not-assigned,global-statement,global-at-module-level,unused-import,unused-variable,unused-argument,unused-wildcard-import,redefined-outer-name,redefined-builtin,redefine-in-handler,undefined-loop-variable,cell-var-from-loop,bare-except,broad-except,duplicate-except,nonstandard-exception,binary-op-exception,property-on-old-class,logging-not-lazy,logging-format-interpolation,bad-format-string-key,unused-format-string-key,bad-format-string,missing-format-argument-key,unused-format-string-argument,format-combined-specification,missing-format-attribute,invalid-format-index,anomalous-backslash-in-string,anomalous-unicode-escape-in-string,bad-open-mode,boolean-datetime,redundant-unittest-assert,deprecated-method,apply-builtin,basestring-builtin,buffer-builtin,cmp-builtin,coerce-builtin,execfile-builtin,file-builtin,long-builtin,raw_input-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,no-absolute-import,old-division,dict-iter-method,dict-view-method,next-method-called,metaclass-assignment,indexing-exception,raising-string,reload-builtin,oct-method,hex-method,nonzero-method,cmp-method,input-builtin,round-builtin,intern-builtin,unichr-builtin,map-builtin-not-iterating,zip-builtin-not-iterating,range-builtin-not-iterating,filter-builtin-not-iterating,using-cmp-argument,eq-without-hash,div-method,idiv-method,rdiv-method,exception-message-attribute,invalid-str-codec,sys-max-int,bad-python3-import,deprecated-string-function,deprecated-str-translate-call,useless-object-inheritance,comparison-with-callable,bad-option-value,consider-using-f-string,unspecified-encoding,missing-timeout,unnecessary-dunder-call,no-value-for-parameter,c-extension-no-member,attribute-defined-outside-init,use-a-generator,too-many-positional-arguments + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=syntax-error,unrecognized-inline-option,init-is-generator,return-in-init,function-redefined,not-in-loop,return-outside-function,yield-outside-function,return-arg-in-generator,nonexistent-operator,duplicate-argument-name,abstract-class-instantiated,bad-reversed-sequence,too-many-star-expressions,invalid-star-assignment-target,star-needs-assignment-target,nonlocal-and-global,continue-in-finally,nonlocal-without-binding,used-prior-global-declaration,method-hidden,access-member-before-definition,no-method-argument,no-self-argument,invalid-slots-object,assigning-non-slot,invalid-slots,inherit-non-class,inconsistent-mro,duplicate-bases,non-iterator-returned,unexpected-special-method-signature,invalid-length-returned,import-error,relative-beyond-top-level,used-before-assignment,undefined-variable,undefined-all-variable,invalid-all-object,no-name-in-module,unbalanced-tuple-unpacking,unpacking-non-sequence,bad-except-order,raising-bad-type,bad-exception-context,misplaced-bare-raise,raising-non-exception,notimplemented-raised,catching-non-exception,slots-on-old-class,super-on-old-class,bad-super-call,missing-super-argument,no-member,not-callable,assignment-from-no-return,too-many-function-args,unexpected-keyword-arg,redundant-keyword-arg,missing-kwoa,invalid-sequence-index,invalid-slice-index,assignment-from-none,not-context-manager,invalid-unary-operand-type,unsupported-binary-operation,repeated-keyword,not-an-iterable,not-a-mapping,unsupported-membership-test,unsubscriptable-object,unsupported-assignment-operation,unsupported-delete-operation,invalid-metaclass,logging-unsupported-format,logging-format-truncated,logging-too-many-args,logging-too-few-args,bad-format-character,truncated-format-string,mixed-format-string,format-needs-mapping,missing-format-string-key,too-many-format-args,too-few-format-args,bad-str-strip-call,print-statement,parameter-unpacking,unpacking-in-except,old-raise-syntax,backtick,long-suffix,old-ne-operator,old-octal-literal,import-star-module-level,yield-inside-async-function,not-async-context-manager,unused-variable,bad-indentation + + +[REPORTS] + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio).You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,future.builtins + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module +max-module-lines=1000 + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma,dict-separator + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[BASIC] + +# Naming hint for argument names +argument-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ + +# Regular expression matching correct argument names +argument-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ + +# Naming hint for attribute names +attr-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ + +# Regular expression matching correct attribute names +attr-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Naming hint for class attribute names +class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Naming hint for class names +class-name-hint=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression matching correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Naming hint for constant names +const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression matching correct constant names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming hint for function names +function-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ + +# Regular expression matching correct function names +function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# Naming hint for inline iteration names +inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Naming hint for method names +method-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ + +# Regular expression matching correct method names +method-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ + +# Naming hint for module names +module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression matching correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty + +# Naming hint for variable names +variable-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ + +# Regular expression matching correct variable names +variable-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ + + +[IMPORTS] + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of statements in function / method body +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/.python-version b/.python-version new file mode 100644 index 000000000..6905745d0 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +linode_api4-python diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..3fad08aad --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,16 @@ +# Read the Docs configuration file for Sphinx projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +version: 2 +build: + os: ubuntu-lts-latest + tools: + python: latest +sphinx: + configuration: docs/conf.py +python: + install: + - method: pip + path: . + extra_requirements: + - doc \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..e023b0d14 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @linode/dx @linode/dx-sdets diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..0a5403963 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,49 @@ +# Contributing Guidelines + +:+1::tada: First off, we appreciate you taking the time to contribute! THANK YOU! :tada::+1: + +We put together the handy guide below to help you get support for your work. Read on! + +## I Just Want to Ask the Maintainers a Question + +The [Linode Community](https://www.linode.com/community/questions/) is a great place to get additional support. + +## How Do I Submit A (Good) Bug Report or Feature Request + +Please open a [GitHub issue](../../issues/new/choose) to report bugs or suggest features. + +Please accurately fill out the appropriate GitHub issue form. + +When filing an issue or feature request, help us avoid duplication and redundant effort -- check existing open or recently closed issues first. + +Detailed bug reports and requests are easier for us to work with. Please include the following in your issue: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made, relevant to the bug +* Anything unusual about your environment or deployment +* Screenshots and code samples where illustrative and helpful + +## How to Open a Pull Request + +We follow the [fork and pull model](https://opensource.guide/how-to-contribute/#opening-a-pull-request) for open source contributions. + +Tips for a faster merge: +* address one feature or bug per pull request. +* large formatting changes make it hard for us to focus on your work. +* follow language coding conventions. +* make sure that tests pass. +* make sure your commits are atomic, [addressing one change per commit](https://chris.beams.io/posts/git-commit/). +* add tests! + +## Code of Conduct + +This project follows the [Linode Community Code of Conduct](https://www.linode.com/community/questions/conduct). + +## Vulnerability Reporting + +If you discover a potential security issue in this project we ask that you notify Linode Security via our [vulnerability reporting process](https://hackerone.com/linode). Please do **not** create a public github issue. + +## Licensing + +See the [LICENSE file](/LICENSE) for our project's licensing. \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..d15ca4b00 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,6 @@ +# Include all files under test/ directory in source distribution only +graft test + +# Exclude Python bytecode +global-exclude *.pyc +global-exclude __pycache__ diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..ce7ef77d0 --- /dev/null +++ b/Makefile @@ -0,0 +1,73 @@ +PYTHON ?= python3 + +LINODE_SDK_VERSION ?= "0.0.0.dev" +VERSION_MODULE_DOCSTRING ?= \"\"\"\nThe version of this linode_api4 package.\n\"\"\"\n\n +VERSION_FILE := ./linode_api4/version.py + +.PHONY: clean +clean: + mkdir -p dist + rm -r dist + rm -f baked_version + +.PHONY: build +build: clean create-version + $(PYTHON) -m build --wheel --sdist + +.PHONY: create-version +create-version: + @printf "${VERSION_MODULE_DOCSTRING}__version__ = \"${LINODE_SDK_VERSION}\"\n" > $(VERSION_FILE) + +.PHONY: release +release: build + $(PYTHON) -m twine upload dist/* + +.PHONY: dev-install +dev-install: clean + $(PYTHON) -m pip install -e ".[dev]" + +.PHONY: install +install: clean create-version + $(PYTHON) -m pip install . + +.PHONY: black +black: + $(PYTHON) -m black linode_api4 test + +.PHONY: isort +isort: + $(PYTHON) -m isort linode_api4 test + +.PHONY: autoflake +autoflake: + $(PYTHON) -m autoflake linode_api4 test + +.PHONY: format +format: black isort autoflake + +.PHONY: lint +lint: build + $(PYTHON) -m isort --check-only linode_api4 test + $(PYTHON) -m autoflake --check linode_api4 test + $(PYTHON) -m black --check --verbose linode_api4 test + $(PYTHON) -m pylint linode_api4 + $(PYTHON) -m twine check dist/* + +# Integration Test Arguments +# TEST_SUITE: Optional, specify a test suite (e.g. domain), Default to run everything if not set +# TEST_CASE: Optional, specify a test case (e.g. 'test_image_replication') +# TEST_ARGS: Optional, additional arguments for pytest (e.g. '-v' for verbose mode) + +TEST_COMMAND = $(if $(TEST_SUITE),$(if $(filter $(TEST_SUITE),linode_client login_client filters),$(TEST_SUITE),models/$(TEST_SUITE))) + +.PHONY: test-int +test-int: + $(PYTHON) -m pytest test/integration/${TEST_COMMAND} $(if $(TEST_CASE),-k $(TEST_CASE)) ${TEST_ARGS} + +.PHONY: test-unit +test-unit: + $(PYTHON) -m pytest test/unit + +.PHONY: test-smoke +test-smoke: + $(PYTHON) -m pytest -m smoke test/integration \ No newline at end of file diff --git a/README.rst b/README.rst index f224da136..5615bb488 100644 --- a/README.rst +++ b/README.rst @@ -1,13 +1,25 @@ -python-linode-api -================= +linode_api4 +=========== The official python library for the `Linode API v4`_ in python. +.. _Linode API v4: https://techdocs.akamai.com/linode-api/reference/api + +.. image:: https://img.shields.io/github/actions/workflow/status/linode/linode_api4-python/main.yml?label=tests + :target: https://img.shields.io/github/actions/workflow/status/linode/linode_api4-python/main.yml?label=tests + +.. image:: https://badge.fury.io/py/linode-api4.svg + :target: https://badge.fury.io/py/linode-api4 + +.. image:: https://readthedocs.org/projects/linode-api4/badge/?version=latest + :target: https://linode-api4.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + Installation ------------ :: - pip install linode-api + pip install linode_api4 Building from Source -------------------- @@ -15,16 +27,65 @@ Building from Source To build and install this package: - Clone this repository -- ``./setup.py install`` - -This package uses the ``linode`` namespace. This could conflict with libraries -intended for previous versions of the Linode API. Please be cautious when -installing packages for both version of the API on the same machine. +- ``python3 -m pip install .`` Usage ------ +===== + +Quick Start +----------- + +In order to authenticate with the Linode API, you will first need to create a +`Linode Personal Access Token`_ with your desired account permissions. + +The following code sample can help you quickly get started using this package. + +.. code-block:: python + + from linode_api4 import LinodeClient, Instance + + # Create a Linode API client + client = LinodeClient("MY_PERSONAL_ACCESS_TOKEN") + + # Create a new Linode + new_linode, root_pass = client.linode.instance_create( + ltype="g6-nanode-1", + region="us-southeast", + image="linode/ubuntu22.04", + label="my-ubuntu-linode" + ) + + # Print info about the Linode + print("Linode IP:", new_linode.ipv4[0]) + print("Linode Root Password:", root_pass) -Read our `Python Reference`_ for extensive documentation on this library. + # List all Linodes on the account + my_linodes = client.linode.instances() + + # Print the Label of every Linode on the account + print("All Instances:") + for instance in my_linodes: + print(instance.label) + + # List Linodes in the us-southeast region + specific_linodes = client.linode.instances( + Instance.region == "us-southeast" + ) + + # Print the label of each Linode in us-southeast + print("Instances in us-southeast:") + for instance in specific_linodes: + print(instance.label) + + # Delete the new instance + new_linode.delete() + +Check out the `Getting Started guide`_ for more details on getting started +with this library, or read `the docs`_ for more extensive documentation. + +.. _Linode Personal Access Token: https://www.linode.com/docs/products/tools/api/guides/manage-api-tokens/ +.. _Getting Started guide: https://linode-api4.readthedocs.io/en/latest/guides/getting_started.html +.. _the docs: https://linode-api4.readthedocs.io/en/latest/index.html Examples -------- @@ -32,6 +93,100 @@ Examples See the `Install on a Linode`_ example project for a simple use case demonstrating many of the features of this library. -.. _Linode API v4: https://developers.linode.com/v4/introduction -.. _Install on a Linode: https://github.com/linode/python-api/tree/master/examples/install-on-linode -.. _Python Reference: https://developers.linode.com/v4/guides/python +.. _Install on a Linode: https://github.com/linode/linode_api4-python/tree/master/examples/install-on-linode + +Contributing +============ + +Tests +----- + +Tests live in the ``test`` directory. When invoking tests, make sure you are +in the root directory of this project. To run the full suite across all +supported python versions, use tox_: + +.. code-block:: shell + + tox + +Running tox also runs pylint and coverage reports. + +The test suite uses fixtures stored as JSON in ``test/fixtures``. These files +contain sanitized JSON responses from the API - the file name is the URL called +to produce the response, replacing any slashes with underscores. + +Test classes should extend ``test.base.ClientBaseCase``. This provides them +with ``self.client``, a ``LinodeClient`` object that is set up to work with +tests. Importantly, any GET request made by this object will be mocked to +retrieve data from the test fixtures. This includes lazy-loaded objects using +this client (and by extension related models). + +When testing against requests other than GET requests, ``self.mock_post`` (and +equivalent methods for other HTTP verbs) can be used in a ``with`` block to +mock out the intended request type. These functions accept the relative path +from the api base url that should be returned, for example:: + + # this should return the result of GET /linode/instances/123 + with self.mock_post('/linode/instances/123'): + linode = self.client.linode.instance_create('g6-standard-2', 'us-east') + self.assertEqual(linode.id, 123) # passes + +.. _tox: http://tox.readthedocs.io + + +Integration Tests +----------------- +Integration tests live in the ``test/integration`` directory. + +Pre-requisite +^^^^^^^^^^^^^^^^^ +Export Linode API token as `LINODE_TOKEN` before running integration tests:: + + export LINODE_TOKEN = $(your_token) + +Running the tests +^^^^^^^^^^^^^^^^^ +Run the tests locally using the make command. Run the entire test suite using command below:: + + make test-int + +To run a specific package/suite, use the environment variable `TEST_SUITE` using directory names in `integration/...` folder :: + + make TEST_SUITE="account" test-int // Runs tests in `integration/models/account` directory + make TEST_SUITE="linode_client" test-int // Runs tests in `integration/linode_client` directory + +Lastly to run a specific test case use environment variable `TEST_CASE` with `test-int` command:: + + make TEST_CASE=test_get_domain_record test-int + +Documentation +------------- + +This library is documented with Sphinx_. Docs live in the ``docs`` directory. +The easiest way to build the docs is to run ``sphinx-autobuild`` in that +folder:: + + sphinx-autobuild docs docs/build + +After running this command, ``sphinx-autobuild`` will host a local web server +with the rendered documentation. + +Classes and functions inside the library should be annotated with sphinx-compliant +docstrings which will be used to automatically generate documentation for the +library. When contributing, be sure to update documentation or include new +docstrings where applicable to keep the library's documentation up to date +and useful. + +**Missing or inaccurate documentation is a bug**. If you notice that the +documentation for this library is out of date or unclear, please +`open an issue`_ to make us aware of the problem. + +.. _Sphinx: http://www.sphinx-doc.org/en/master/index.html +.. _open an issue: https://github.com/linode/linode_api4-python/issues/new + +Contributing +------------ + +Please follow the `Contributing Guidelines`_ when making a contribution. + +.. _Contributing Guidelines: https://github.com/linode/linode_api4-python/blob/master/CONTRIBUTING.md diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000..7ce7e8842 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = linode_api4 +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000..ee6609943 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,172 @@ +# -* coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/stable/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use Path(...).absolute() to make it absolute, like shown here. +# +import sys +from pathlib import Path +sys.path.insert(0, str(Path('..').absolute())) + + +# -- Project information ----------------------------------------------------- + +project = 'linode_api4' +copyright = '2024, Akamai Technologies Inc.' +author = 'Linode' + +# The short X.Y version +version = '' +# The full version, including alpha/beta/rc tags +release = '3.0.0' + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.viewcode', + 'sphinx.ext.githubpages', + 'sphinxcontrib.fulltoc', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path . +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + 'sidebar_width': '320px', + 'page_width': '1000px', +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'linode_api4doc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'linode_api4.tex', 'linode_api4 Documentation', + 'Linode', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'linode_api4', 'linode_api4 Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'linode_api', 'linode_api4 Documentation', + author, 'linode_api4', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Extension configuration ------------------------------------------------- + +# -- Options for todo extension ---------------------------------------------- + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True diff --git a/docs/guides/core_concepts.rst b/docs/guides/core_concepts.rst new file mode 100644 index 000000000..7299c45db --- /dev/null +++ b/docs/guides/core_concepts.rst @@ -0,0 +1,183 @@ +Core Concepts +============= + +.. module:: linode + +The linode_api4 package, and the API V4, have a few ideas that will help you more +quickly become proficient with their usage. This page assumes you've read the +`Getting Started `_ guide, and know the basics of +authentication already. + +Pagination +---------- + +The Linode API V4 loosely follows a RESTful design, and paginates results to +responses for GETs to collections. This library handles pagination +transparently, and does not load pages of data until they are required. This +is handled by the :any:`PaginatedList` class, which +behaves similarly to a python list. For example:: + + linodes = client.linode.instances() # returns a PaginatedList of linodes + + first_linode = linodes[0] # the first page is loaded automatically, this does + # not emit an API call + + # you can also use the `first()` convenience function for this + first_linode = linodes.first() + + last_linode = linodes[-1] # loads only the last page, if it hasn't been loaded yet + # this _will_ emit an API call if there were two or + # more pages of results. If there was only one page, + # this does not emit an additional call + + for current_linode in linodes: # iterate over all results, loading pages as necessary + print(current_linode.label) + +If you're not concerned about performance, using a +:any:`PaginatedList` as a normal list should be fine. If +your application is sensitive to performance concerns, be aware that iterating +over a :any:`PaginatedList` can cause the thread to wait as a synchronous +request for additional data is made mid-iteration. + +Filtering +--------- + +Collections of objects in the API can be filtered to make their results more +useful. For example, instead of having to do this filtering yourself on the +full list, you can ask the API for all Linode Instances you own belonging to a +certain group. This library implements filtering with a SQLAlchemy-like +syntax, where a model's attributes may be used in comparisons to generate +filters. For example:: + + from linode_api4 import Instance + + prod_linodes = client.linode.instances(Instance.group == "production") + +Filters may be combined using boolean operators similar to SQLAlchemy:: + + # and_ and or_ can be imported from the linode package to combine filters + prod_or_staging = client.linode.instances(or_(Instance.group == "production", + Instance.group == "staging")) + + # and_ isn't strictly necessary, as it's the default when passing multiple + # filters to a collection + prod_and_green = client.linode.instances(Instance.group == "production", + Instance.label.contains("green")) + +Filters are generally only applicable for the type of model you are querying, +but can be combined to your heart's content. For numeric fields, the standard +numeric comparisons are accepted, and work as you'd expect. See +:doc:`Filtering Collections` for full details. + +Models +------ + +This library represents objects the API returns as "models." Most methods of +:any:`LinodeClient` return models or lists of models, and all models behave +in a similar manner. + +Creating Models +^^^^^^^^^^^^^^^ + +In addition to looking up models from collections, you can simply import the +model class and create it by ID.:: + + from linode_api4 import Instance + + my_linode = Instance(client, 123) + +All models take a `LinodeClient` as their first parameter, and their ID as the +second. For derived models (models that belong to another model), the parent +model's ID is taken as a third argument to the constructor (i.e. to construct +a :any:`Disk` you pass a :any:`LinodeClient`, the disk's ID, then the parent +Linode Instance's ID). + +Be aware that when creating a model this way, it is _not_ loaded from the API +immediately. Models in this library are **lazy-loaded**, and will not be looked +up until one of their attributes that is currently unknown is accessed. + +In order to automatically populate a model for an existing Linode resource, +consider using the :any:`LinodeClient.load` method:: + + from linode_api4 import Instance, Disk + + instance = client.load(Instance, 12345) + instance_disk = client.load(Disk, 123, instance.id) + +Lazy Loading +^^^^^^^^^^^^ + +If a model is created, but not yet retrieved from the API, its attributes will be +unpopulated. As soon as an unpopulated attribute is accessed, an API call is +emitted to retrieve that value (and the rest of the attributes in the model) from +the API. For example:: + + my_linode.id # no API call emitted - this was set on creation + my_linode.label # API call emitted - entire object is loaded from response + my_linode.group # no API call emitted - this was loaded above + +.. note:: + + When loading a model in this fashion, if the model does not exist in the API + or you do not have access to it, an ApiError is raised. If you want to load + a model in a more predictable manner, see :any:`LinodeClient.load` + +Volatile Attributes +^^^^^^^^^^^^^^^^^^^ + +Some attributes of models are marked **volatile**. A **volatile** attribute will +become stale after a short time, and if accessed when its value is stale, will +refresh itself (and the entire object) from the API to ensure the value is +current.:: + + my_linode.boot() + my_linode.status # booting + time.sleep(20) # wait for my_linode.status to become stale + my_linode.status # running + + +.. note:: + + While it is often safe to loop on a **volatile** attribute, be aware that there is + no guarantee that their value will ever change - be sure that any such loops + have another exit condition to prevent your application from hanging if something + you didn't expect happens. + +Updating and Deleting Models +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Most models have some number of mutable attributes. Updating a model is as simple +as assigning a new value to these attributes and then saving the model. Many +models can also be deleted in a similar fashion.:: + + my_linode.label = "new-label" + my_linode.group = "new-group" + my_linode.save() # emits an API call to update label and group + + my_linode.delete() # emits an API call to delete my_linode + +.. note:: + + Saving a model *may* fail if the values you are attempting to save are invalid. + If the values you are attempting to save are coming from an untrusted source, + be sure to handle a potential :any:`ApiError` raised by the API returning + an unsuccessful response code. + + When updating an attribute on a model, ensure that the model has been populated + *before* any local changes have been made. Attempting to update an attribute + and save a model before the model has been populated will result in no changes + being applied. + +Relationships +^^^^^^^^^^^^^ + +Many models are related to other models (for example a Linode Instance has +disks, configs, volumes, backups, a region, etc). Related attributes are +accessed like any other attribute on the model, and will emit an API call to +retrieve the related models if necessary.:: + + len(my_linode.disks) # emits an API call to retrieve related disks + my_linode.disks[0] # no API call emitted - this is already loaded + + my_linode.region.id # no API call emitted - IDs are already populated + my_linode.region.country # API call emitted - retrieves region object diff --git a/docs/guides/event_polling.rst b/docs/guides/event_polling.rst new file mode 100644 index 000000000..b9a782f3c --- /dev/null +++ b/docs/guides/event_polling.rst @@ -0,0 +1,104 @@ +Polling for Events +================== + +There are often situations where an API request will trigger a +long-running operation (e.g. Instance shutdown) that will run +after the request has been made. These operations are tracked +through `Linode Account Events`_ which reflect the target entity, +progress, and status of these operations. + +.. _Linode Account Events: https://www.linode.com/docs/api/account/#events-list + +There are often cases where you would like for your application to +halt until these operations have succeeded. The most reliable and +efficient way to achieve this is by using the :py:class:`EventPoller` +object. + +Polling on Basic Operations +--------------------------- + +In order to poll for an operation, we must create an :py:class:`EventPoller` +object *before* the endpoint that triggers the operation has been called. + +Assuming a :py:class:`LinodeClient` object has already been created with the name +"client" and an :py:class:`Instance` object has already been created with the name "my_instance", +an :py:class:`EventPoller` can be created using the +:meth:`LinodeClient.polling.event_poller_create(...) ` +method:: + + poller = client.polling.event_poller_create( + "linode", # The type of the target entity + "linode_shutdown", # The action to poll for + entity_id=my_instance.id, # The ID of your Linode Instance + ) + +Valid values for the `type` and `action` fields can be found in the `Events Response Documentation`_. + +.. _Events Response Documentation: https://www.linode.com/docs/api/account/#events-list__responses + +From here, we can send the request to trigger the long-running operation:: + + my_instance.shutdown() + +To wait for this operation to finish, we can call the +:meth:`poller.wait_for_next_event_finished(...) ` +method:: + + poller.wait_for_next_event_finished() + +The :py:class:`timeout` (default 240) and :py:class:`interval` (default 5) arguments can optionally be used to configure the timeout +and poll frequency for this operation. + +Bringing this together, we get the following:: + + from linode_api4 import LinodeClient, Instance + + # Construct a client + client = LinodeClient("MY_LINODE_TOKEN") + + # Fetch an existing Linode Instance + my_instance = client.load(Instance, 12345) + + # Create the event poller + poller = client.polling.event_poller_create( + "linode", # The type of the target entity + "linode_shutdown", # The action to poll for + entity_id=my_instance.id, # The ID of your Linode Instance + ) + + # Shutdown the Instance + my_instance.shutdown() + + # Wait until the event has finished + poller.wait_for_next_event_finished() + + print("Linode has been successfully shutdown!") + +Polling for an Entity to be Free +-------------------------------- + +In many cases, certain operations cannot be run until any other operations running on a resource have +been completed. To ensure these operation are run reliably and do not encounter conflicts, +you can use the +:meth:`LinodeClient.polling.wait_for_entity_free(...) ` method +to wait until a resource has no running or queued operations. + +For example:: + + # Construct a client + client = LinodeClient("MY_LINODE_TOKEN") + + # Load an existing instance + my_instance = client.load(Instance, 12345) + + # Wait until the Linode is not busy + client.polling.wait_for_entity_free( + "linode", + my_instance.id + ) + + # Boot the Instance + my_instance.boot() + +The :py:class:`timeout` (default 240) and :py:class:`interval` (default 5) arguments can optionally be used to configure the timeout +and poll frequency for this operation. diff --git a/docs/guides/getting_started.rst b/docs/guides/getting_started.rst new file mode 100644 index 000000000..01b2a6d6c --- /dev/null +++ b/docs/guides/getting_started.rst @@ -0,0 +1,100 @@ +Getting Started +=============== + +Installation +------------ + +The linode_api4 package can be installed from pypi as shown below: + +.. code-block:: shell + + pip install linode_api4 + +If you prefer, you can clone the package from github_ and install it from source: + +.. _github: https://github.com/Linode/linode_api4-python + +.. code-block:: shell + + git clone git@github.com:Linode/linode_api4-python + cd linode_api4 + python -m pip install . + +Authentication +-------------- + +In order to make requests to the Linode API, you will need a token. To generate +one, log in to cloud.linode.com_, and on your profile_ click "Create a Personal +Access Token". + +.. _cloud.linode.com: https://cloud.linode.com +.. _profile: https://cloud.linode.com/profile/tokens + +.. note:: + You can also use an OAuth Token to authenticate to the API - see :doc:`OAuth` + for details. + +When creating a Personal Access Token, you will be prompted for what scopes the +token should be created with. These scopes control what parts of your account +this token may be used to access - for more information, see :ref:`OAuth Scopes`. +Restricting what a token can access is more secure than creating one with access +to your entire account, but can be less convenient since you would need to create +a new token to access other parts of the account. For the examples on this page, +your Personal Access Token must be able to view and create Linode Instances. + +Listing your Linode Instances +----------------------------- + +Using the token you generated above, create a :py:class:`LinodeClient` object +that will be used for all interactions with the API.:: + + from linode_api4 import LinodeClient + client = LinodeClient(token) + +This object will manage all requests you make through the API. Once it's +set up, you can use it to retrieve and print a list of your Linode Instances:: + + my_linodes = client.linode.instances() + + for current_linode in my_linodes: + print(current_linode.label) + +When retrieving collections of objects from the API, a list-like object is +returned, and may be iterated over or indexed as a normal list. + +Creating a Linode Instance +-------------------------- + +In order to create a Linode Instance, we need a few pieces of information: + + * what :any:`Region` to create the Instance in. + * what :any:`Type` of Instance to create. + * what :any:`Image` to deploy to the new Instance. + +We can query for these values similarly to how we listed our Linode Instances +above:: + + available_regions = client.regions() + +We could also use values that we know in advance to avoid the need to query the +API. For example, we may know that we want a `g6-standard-4` Instance running +the `linode/debian9` Image. Both objects and IDs are accepted when creating an +Instance.:: + + chosen_region = available_regions[0] + + + new_linode, password = client.linode.instance_create('g5-standard-4', + chosen_region, + image='linode/debian9') + +:py:func:`instance_create` returns the newly-created Instance object and the +root password that was generated for it. This Instance will boot automatically, +and should be available shortly. Finally, let's print out the results so we +can access our new server. + +.. code-block:: python + + print("ssh root@{} - {}".format(new_linode.ipv4[0], password)) + +Continue on to `Core Concepts `_ diff --git a/docs/guides/oauth.rst b/docs/guides/oauth.rst new file mode 100644 index 000000000..d4c5d3d46 --- /dev/null +++ b/docs/guides/oauth.rst @@ -0,0 +1,142 @@ +OAuth Integration +================= + +Overview +-------- + +OAuth 2 is an open authentication protocol that describes how users can safely +grant third-party applications access to some or all of their accounts with +service providers. Linode implements OAuth 2 with `https://login.linode.com`_, +allowing third-party developers worlds of possibilities when integrating with +Linode's service. By making an OAuth application, you can allow users to +grant your app access to create, install, configure, and manage infrastructure +on their behalf. + +.. _`https://login.linode.com`: https://login.linode.com + +.. note:: + If you are simply trying out the API, or if you're writing a command line + tool that accepts a Personal Access Token, you can safely skip this guide. + +The OAuth 2 workflow has three actors: + +.. glossary:: + + end user + The acting user who will log in to the application. + + authentication server + The server that authorizes logins and issues tokens. In this case, it will + be login.linode.com + + client application + The application you are writing, that Linode users will login to through + Linode's OAuth server. You must register OAuth clients at + https://cloud.linode.com or through + :any:`oauth_client_create` + to generate a client ID and client secret (used in the exchange detailed + below). + +The OAuth 2 exchange works as follows: + +#. The end user visits the client application's website and attempts to login + using OAuth. +#. The client application redirects the end user to the authentication server + with the client application's client ID and requested OAuth scopes in the + query string. +#. The end user inputs their credentials to the authorization server and + authorizes the login. +#. The authorization server redirects the end user to the client application + with a temporary exchange code in the query string. +#. The client application issues a request to the authentication server + containing the exchange code and the client application's client secret. +#. The authentication server responds to the client application with a newly + issued OAuth token. + +A working example of completing an OAuth exchange using this library is +available in the example project `Install on Linode`_ + +.. _Install on Linode: https://github.com/linode/linode_api4-python/tree/master/examples/install-on-linode + +.. _oauth_scopes: + +OAuth Scopes +------------ + +OAuth scopes define the level of access your client application has to the +accounts of users who authorize against it. While it may be easier to always +request the broadest scopes, this is discouraged as it is more dangerous for +the end user. The end user is presented with the requested scopes during the +authorization process and may choose to abort authorization of your application +based on the scopes requested. + +OAuth scopes are represented by the +:any:`OAuthScopes` class, which can be used to +construct lists of scopes to request. OAuth scopes are divided into +"superscopes," broad categories of entities/actions that may be requested +access to, and "subscopes," the level of access requested to a particular +entity class. For example, if you are writing a frontend to manage +NodeBalancers, you may need access to create and modify NodeBalancers, and also +to list Linode Instances (to display more information about the individual +backends). In this hypothetical case, you would likely want to construct your +requested scopes like this:: + + requested_scopes = [OAuthScopes.NodeBalancer.all, OAuthScopes.Linodes.view] + +Performing an OAuth Login +------------------------- + +The :any:`LinodeLoginClient` class manages all +aspects of the OAuth exchange in this library. To create a +:any:`LinodeLoginClient`, you must use your client ID +and client secret (generated upon registering a client application with Linode - +see above).:: + + login_client = LinodeLoginClient(my_client_id, my_client_secret) + +When a user attempts to login to your application using OAuth, you must issue a +redirect to our authentication server (step 2 above). The +:any:`LinodeLoginClient` handles most of the details +of this for you, returning the complete URL to redirect the end user to:: + + def begin_oauth_login(): + """ + An example function called when a user attempts to login user OAuth. + """ + # generate a URL to redirect the user to, requested full access to their + # account + redirect_to = login_client.generate_login_url(scopes=OAuthScopes.all) + + # use your web framework to redirect the user to the generated URL + return redirect(redirect_to) + +Once the user has authenticated and approved this login, they will be redirected +to the URL configured when your client application was registered. Your web +application must accept this request, and should use it to complete the OAuth +exchange (step 5 above):: + + def oauth_redirect(code=None): + """ + An example callback function when a user authorizes this application. + + :param code: The exchange code provided by the authentication server, + present in the query string of the request. + :type code: str + """ + token, scopes = login_client.finish_oauth(code) + + # token is a valid OAuth token that may be used to construct a + # LinodeClient and access the API on behalf of this user. + +Now that you have been issued a token, be sure to keep it secret and specific +to this user - it should be tied to their session if possible. + +Logging Out +----------- + +When a user logs out of their account, you *must* call +:any:`LoginClient.expire_token` with the +token issued to your application. This will invalidate the OAuth token the +user generated by logging in, which will completely revoke their session. +Simply invalidating their session in your application and leaving their OAuth +token active is *not* a complete logout, and should be avoided. diff --git a/docs/guides/upgrading_from_linode-api.rst b/docs/guides/upgrading_from_linode-api.rst new file mode 100644 index 000000000..af501e00b --- /dev/null +++ b/docs/guides/upgrading_from_linode-api.rst @@ -0,0 +1,103 @@ +Upgrading from linode-api +========================= + +.. package:: linode_api4 + +.. highlight:: python + +This library was previously released as ``linode-api``, which is still +available as a `branch on the linode_api4 github`_. If you used the +``linode-api`` package previously and would like to upgrade your scripts +to use ``linode_api4``, this guide is for you. + +.. _branch on the linode_api4 github: https://github.com/linode/linode_api4-python/tree/linode-api + +New Dependency and Imports +-------------------------- + +This package is now called ``linode_api4``. In any ``setup.py`` or +``requirements.txt`` you have in your project, change ``linode-api`` to +``linode_api4``. + +The module you import in your classes has changed from ``linode`` to +``linode_api4`` to match the package name. + +Renamed Linode Class +-------------------- + +The ``Linode`` class has been renamed to :any:`Instance` to match the upstream +naming convention. If your script looks like this:: + + from linode import LinodeClient, Linode + + client = LinodeClient(token) + linode = client.load(Linode, 123) + +You should change it to this:: + + from linode_api4 import LinodeClient, Instance + + client = LinodeClient(token) + instance = client.load(Instance, 123) + +New Method Naming Scheme +------------------------ + +Methods used to retrieve or create objects now follow a "noun-verb" convention +instead of the previous "verb-noun" convention. For example, the +``create_domain`` method to create a new :any:`Domain` is now +``domain_create``. + +Additionally, the ``get_`` prefix has been dropped from methods returning lists +of objects. The ``domains`` method replaces the old method name ``get_domains``. + +If your code looked like this:: + + from linode import LinodeClient, Linode + + client = LinodeClient(token) + + linodes = client.linode.get_instances() + print("You have {} Linodes".format(len(linodes))) + + new_linode, password = client.linode.create_instance('g6-standard-2', 'us-east', + image='linode/debian9') + + print("Now you have {} Linodes".format(len(linodes)+1)) + print("Your new Linode's ip address is {}".format(new_linode.ipv4[0])) + +It would be changed to this:: + + from linode_api4 import LinodeClient, Instance + + client = LinodeClient(token) + + instances = client.linode.instances() + print("You have {} Linode Instances".format(len(instances)) + + new_instance, password = client.linode.instance_create('g6-standard-2', 'us-east', + image='linode/debian9') + + print("Now you have {} Linode Instances".format(len(instances)+1)) + print("Your new Instance's ip address is {}".format(new_instance.ipv4[0])) + +New Package Structure +--------------------- + +.. note:: + The imports that need to be changed were never the recommended way of + importing classes, and all recommended, documented import schemes still work + without change. + +In the unlikely case that you are importing classes from deep within the +``linode.objects`` package, you may need to change your imports to match the +new package structure. For example, if your code currently does this:: + + from linode import LinodeClient + from linode.objects.linode.linode import Linode + from linode.objects.linode.disk import Disk + +You will need to change it to this:: + + from linode import LinodeClient + from linode.objects.linode import Instance, Disk diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 000000000..1faf5dfa8 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,42 @@ +linode_api4 +=========== + +This is the documentation for the official Python bindings of the Linode +API v4. For API documentation, see `techdocs.akamai.com`_. + +This library can be used to interact with all features of the Linode API. + +.. _techdocs.akamai.com: https://techdocs.akamai.com/linode-api/reference/api + +Installation +------------ + +To install through pypi:: + + pip install linode_api4 + +To install from source:: + + git clone https://github.com/linode/linode_api4-python + cd linode_api4 + python -m pip install . + +For more information, see our :doc:`Getting Started` +guide. + +Table of Contents +----------------- + +.. toctree:: + :maxdepth: 2 + + guides/getting_started + guides/core_concepts + guides/event_polling + guides/oauth + linode_api4/linode_client + linode_api4/login_client + linode_api4/objects/models + linode_api4/polling + linode_api4/paginated_list + linode_api4/objects/filtering diff --git a/docs/linode_api4/linode_client.rst b/docs/linode_api4/linode_client.rst new file mode 100644 index 000000000..8a602f1c8 --- /dev/null +++ b/docs/linode_api4/linode_client.rst @@ -0,0 +1,237 @@ +Linode Client +============= + +.. module:: linode_api4 + +The LinodeClient is responsible for managing your connection to the API using +your token. A LinodeClient is required for all connections to the API, and a +reference to one is required by every model. A LinodeClient is created with a +token, either an OAuth Token from the OAuth Exchange (see +:doc:`oauth<../guides/oauth>` for more information) or a Personal Access Token. +See our :doc:`getting_started<../guides/getting_started>` guide for more +information:: + + from linode_api4 import LinodeClient + + token = "api-token" # your token goes here + + client = LinodeClient(token) + +Grouping +-------- + +The LinodeClient class is divided into groups following the API's overall +design - some methods and functions are accessible only through members of the +LinodeClient class:: + + # access an ungrouped member + client.regions() # /regions + + # access a grouped member - note the URL matches the grouping + client.linode.instances() # /linode/instances + +The LinodeClient itself holds top-level collections of the API, while anything +that exists under a group in the API belongs to a member of the client. + +LinodeClient class +------------------ + +.. autoclass:: LinodeClient + :members: + + .. automethod:: __init__ + +Groups +------ + +These groups are accessed off of the :any:`LinodeClient` class by name. For +example:: + + client.linode.instances() + +See :any:`LinodeClient` for more information on the naming of these groups, +although generally they are named the same as the first word of the group. + +AccountGroup +^^^^^^^^^^^^ + +Includes methods for managing your account. + +.. autoclass:: linode_api4.linode_client.AccountGroup + :members: + :special-members: + +BetaGroup +^^^^^^^^^ + +Includes methods for enrolling in beta programs. + +.. autoclass:: linode_api4.linode_client.BetaGroup + :members: + :special-members: + +DatabaseGroup +^^^^^^^^^^^^^ + +Includes methods for managing Linode Managed Databases. + +.. autoclass:: linode_api4.linode_client.DatabaseGroup + :members: + :special-members: + +DomainGroup +^^^^^^^^^^^ + +Includes methods for managing Linode Domains. + +.. autoclass:: linode_api4.linode_client.DomainGroup + :members: + :special-members: + +ImageGroup +^^^^^^^^^^ + +Includes methods for managing Linode Images. + +.. autoclass:: linode_api4.linode_client.ImageGroup + :members: + :special-members: + +LinodeGroup +^^^^^^^^^^^ + +Includes methods for managing and creating Linode Instances, as well as +accessing and working with associated features. + +.. autoclass:: linode_api4.linode_client.LinodeGroup + :members: + :special-members: + +LKEGroup +^^^^^^^^^ + +Includes methods for interacting with Linode Kubernetes Engine. + +.. autoclass:: linode_api4.linode_client.LKEGroup + :members: + :special-members: + +LongviewGroup +^^^^^^^^^^^^^ + +Includes methods for interacting with our Longview service. + +.. autoclass:: linode_api4.linode_client.LongviewGroup + :members: + :special-members: + +LockGroup +^^^^^^^^^^^^^ + +Includes methods for interacting with our Lock service. + +.. autoclass:: linode_api4.linode_client.LockGroup + :members: + :special-members: + +NetworkingGroup +^^^^^^^^^^^^^^^ + +Includes methods for managing your networking systems. + +.. autoclass:: linode_api4.linode_client.NetworkingGroup + :members: + :special-members: + +NodeBalancerGroup +^^^^^^^^^^^^^^^^^ + +Includes methods for managing Linode NodeBalancers. + +.. autoclass:: linode_api4.linode_client.NodeBalancerGroup + :members: + :special-members: + +ObjectStorageGroup +^^^^^^^^^^^^^^^^^^ + +Includes methods for interacting with Linode Objects Storage. For interacting +with buckets and objects, use the s3 API directly with a library like `boto3`_. + +.. autoclass:: linode_api4.linode_client.ObjectStorageGroup + :members: + :special-members: + +.. _boto3: https://github.com/boto/boto3 + +PlacementAPIGroup +^^^^^^^^^^^^ + +Includes methods related to VM placement. + +.. autoclass:: linode_api4.linode_client.PlacementAPIGroup + :members: + :special-members: + +PollingGroup +^^^^^^^^^^^^ + +Includes methods related to account event polling. + +.. autoclass:: linode_api4.linode_client.PollingGroup + :members: + :special-members: + +ProfileGroup +^^^^^^^^^^^^ + +Includes methods for managing your user. + +.. autoclass:: linode_api4.linode_client.ProfileGroup + :members: + :special-members: + +RegionGroup +^^^^^^^^^^^ + +Includes methods for accessing information about Linode Regions. + +.. autoclass:: linode_api4.linode_client.RegionGroup + :members: + :special-members: + +SupportGroup +^^^^^^^^^^^^ + +Includes methods for viewing and opening tickets with our support department. + +.. autoclass:: linode_api4.linode_client.SupportGroup + :members: + :special-members: + +TagGroup +^^^^^^^^ + +Includes methods for managing Linode Tags. + +.. autoclass:: linode_api4.linode_client.TagGroup + :members: + :special-members: + +VolumeGroup +^^^^^^^^^^^ + +Includes methods for managing Linode Volumes. + +.. autoclass:: linode_api4.linode_client.VolumeGroup + :members: + :special-members: + +VPCGroup +^^^^^^^^ + +Includes methods for managing Linode VPCs. + +.. autoclass:: linode_api4.linode_client.VPCGroup + :members: + :special-members: diff --git a/docs/linode_api4/login_client.rst b/docs/linode_api4/login_client.rst new file mode 100644 index 000000000..36e680f68 --- /dev/null +++ b/docs/linode_api4/login_client.rst @@ -0,0 +1,32 @@ +Linode Login Client +=================== + +.. module:: linode_api4 + +The :any:`LinodeLoginClient` is the primary interface to the +`login.linode.com`_ OAuth service, and only needs to be used if writing an +OAuth application. For an example OAuth application, see `Install on Linode`_, +and for a more comprehensive overview of OAuth, read our :doc:`OAuth +guide<../guides/oauth>`. + +.. _login.linode.com: https://login.linode.com +.. _Install on Linode: https://github.com/linode/linode_api4-python/tree/master/examples/install-on-linode + +LinodeLoginClient class +----------------------- + +Your interface to Linode's OAuth authentication server. + +.. autoclass:: linode_api4.LinodeLoginClient + :members: + + .. automethod:: __init__ + +OAuth Scopes +------------ + +When requesting authorization to a user's account, OAuth Scopes allow you to +specify the level of access you are requesting. + +.. autoclass:: linode_api4.login_client.OAuthScopes + :members: diff --git a/docs/linode_api4/objects/filtering.rst b/docs/linode_api4/objects/filtering.rst new file mode 100644 index 000000000..df88d6e66 --- /dev/null +++ b/docs/linode_api4/objects/filtering.rst @@ -0,0 +1,5 @@ +Filtering Collections +===================== + +.. automodule:: linode_api4.objects.filtering + :members: diff --git a/docs/linode_api4/objects/models.rst b/docs/linode_api4/objects/models.rst new file mode 100644 index 000000000..8cef969c6 --- /dev/null +++ b/docs/linode_api4/objects/models.rst @@ -0,0 +1,168 @@ +Models +====== + +This page documents all models made available by the linode_api4 library. These +models are can be looked up or created as described in the +:doc:`Core Concepts` document. + +Account Models +-------------- + +.. automodule:: linode_api4.objects.account + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Beta Models +----------- + +.. automodule:: linode_api4.objects.beta + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Database Models +------------- + +.. automodule:: linode_api4.objects.database + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Domain Models +------------- + +.. automodule:: linode_api4.objects.domain + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Image Models +------------ + +.. automodule:: linode_api4.objects.image + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Linode Models +------------- + +.. automodule:: linode_api4.objects.linode + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +LKE Models +---------- + +.. automodule:: linode_api4.objects.lke + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Longview Models +--------------- + +.. automodule:: linode_api4.objects.longview + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Networking Models +----------------- + +.. automodule:: linode_api4.objects.networking + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Nodebalancer Models +------------------- + +.. automodule:: linode_api4.objects.nodebalancer + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Object Storage Models +--------------------- + +.. automodule:: linode_api4.objects.object_storage + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Placement Models +-------------- + +.. automodule:: linode_api4.objects.placement + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Profile Models +-------------- + +.. automodule:: linode_api4.objects.profile + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Region Models +------------- + +.. automodule:: linode_api4.objects.region + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Support Models +-------------- + +.. automodule:: linode_api4.objects.support + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Tag Models +---------- + +.. automodule:: linode_api4.objects.tag + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +Volume Models +------------- + +.. automodule:: linode_api4.objects.volume + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: + +VPC Models +---------- + +.. automodule:: linode_api4.objects.vpc + :members: + :exclude-members: api_endpoint, properties, derived_url_path, id_attribute, parent_id_name + :undoc-members: + :inherited-members: diff --git a/docs/linode_api4/paginated_list.rst b/docs/linode_api4/paginated_list.rst new file mode 100644 index 000000000..01cbfe0f2 --- /dev/null +++ b/docs/linode_api4/paginated_list.rst @@ -0,0 +1,41 @@ +Pagination +========== + +The Linode API V4 returns collections of resources one page at a time. While +this is useful, this library abstracts away the details of pagination and makes +collections of resources appear as a single, uniform list that can be accessed, +iterated over, and indexed as any normal Python list would be:: + + regions = client.regions() # get a collection of Regions + + for region in regions: + print(region.id) + + first_region = regions[0] + last_region = regions[-1] + +Pagination is handled transparently, and as requested. For example, if you had +three pages of Linode Instances, accessing your collection of Instances would +behave like this:: + + instances = client.linode.instances() # loads the first page only + + instances[0] # no additional data is loaded + + instances[-1] # third page is loaded to retrieve the last Linode in the collection + + for instance in instances: + # the second page will be loaded as soon as the first Linode on that page + # is required. The first and third pages are already loaded, and will not + # be loaded again. + print(instance.label) + +The first page of a collection is always loaded when the collection is +returned, and subsequent pages are loaded as they are required. When slicing +a paginated list, only the pages required for the slice are loaded. + +PaginatedList class +------------------- + +.. autoclass:: linode_api4.PaginatedList + :members: first, only, last diff --git a/docs/linode_api4/polling.rst b/docs/linode_api4/polling.rst new file mode 100644 index 000000000..6f5d956ea --- /dev/null +++ b/docs/linode_api4/polling.rst @@ -0,0 +1,12 @@ +Event Polling +========== + +This project exposes a framework for dynamically polling on long-running Linode Events. + +See the :doc:`Event Polling Guide<../guides/event_polling>` for more details. + +EventPoller class +------------------- + +.. autoclass:: linode_api4.EventPoller + :members: diff --git a/e2e_scripts b/e2e_scripts new file mode 160000 index 000000000..3265074d0 --- /dev/null +++ b/e2e_scripts @@ -0,0 +1 @@ +Subproject commit 3265074d0d7ff8db6ce5207084051e1fc45d0763 diff --git a/examples/install-on-linode/README.md b/examples/install-on-linode/README.md index 4f84afae7..7fe3463c9 100644 --- a/examples/install-on-linode/README.md +++ b/examples/install-on-linode/README.md @@ -1,33 +1,31 @@ # Install on Linode -A sample application for the official [linode python library](https://github.com/linode/python-linode-api). +A sample application for the official [Linode Python Library](https://github.com/linode/linode_api4-python). **Install on Linode** demonstrates a multi-user application developed with -the Linode API V4 - users arrive at a third-party application, and are asked +the Linode API - users arrive at a third-party application, and are asked to authorize the application to make changes to their account, which are then executed and reported to the user. In this example, the third-party application -uses the `linodes:*` oauth scope to deploy a stackscript to a new linode. +uses the `linodes:*` OAuth scope to deploy a StackScript to a new Linode Instance. ### How to Use This project is very bare-bones to keep it simple and focused on the core concepts being demonstrated. It relies on Flask and Flask-Login, as well -as the `linode-api` package, and does not require any external services. All +as the `linode_api4` package, and does not require any external services. All of the logic lives in app.py, with all configuration in config.py (not included in the repository, see instructions below). To set up: + * Install the required packages (see requirements.txt) - * Copy config.py.example to config.py and populate values - * You will need to go to [login.alpha.linode.com](http://login.alpha.linode.com) - and create a new oauth client to get your client ID and client secret - when - registering your application, if running this locally, set the redirect uri - to `localhost:5000/auth_callback`. - * You will need to create a public stackscript to use for this application, - or else pick an existing public stackscript. You will need to take its - stackscript ID in the linode Linode API V4 ID format: `stackscript_123` for example. - You can run the utility script `./create_stackscript.py` to make a (blank) - stackscript suitable for running this. + * Copy config.py.example to config.py and populate values: + * You will need an OAuth Client created in [the Linode Manager](https://cloud.linode.com/profile/clients). + When prompted, ensure that the "redirect_uri" is `http://localhost:5000/auth_callback`, + and leave "Public" unchecked. + * You will need a public stackscript to use this application - either use the + default ID provided (320826), or replace it with the ID returned by the + `make_stackscript.py` script included here. * Run the application with `python3 app.py` ### Concepts Demonstrated @@ -38,22 +36,22 @@ Please note that in the future, users may be able to select what scopes they gra an application, so you should always check to make sure you are granted what your application needs in order to run. -**Linode Creation** - This application creates a linode for the user with a specific +**Instance Creation** - This application creates a linode for the user with a specific setup, configured in part by the user and in part by the program. In this case, the application will install the owner's application on the new linode and provide information on how to access the newly-created server. -**Unauthenticated Services** - This application accesses several public endpoints of the -Linode API V4, includes `/kernels`, `/regions`, and a single public stackscript -(presumably controlled by the application's author). The stackscript needs to be public -so that the authenticated user's account can access it in order to install it on the linode. +**Unauthenticated Services** - This application accesses several public functions of the +Linode API, including `linode.kernels()`, `regions()`, and a single public StackScript +(presumably controlled by the application's author). The StackScript needs to be public +so that the authenticated user's account can access it in order to install it on the Instance. -**Object Retreival** - This application retrieves objects from the Linode API V4 in two ways: +**Object Retreival** - This application retrieves objects from the Linode API in two ways: both as a list, and as a single requested object. Lists are retrieved by asking the -`LinodeClient` for a list of related objects, like `client.get_regions()`, while +`LinodeClient` for a list of related objects, like `client.regions()`, while individual objects that we already know the ID for and will not change can be accessed by -creating a new instace of the correct type with the known ID. For this to work, the -user whose token is being used must have access to the contstruted object. +creating a new instance of the correct type with the known ID. For this to work, the +user whose token is being used must have access to the constructed object. ### Disclaimer diff --git a/examples/install-on-linode/app.py b/examples/install-on-linode/app.py index 42aafc0d0..e203973e1 100644 --- a/examples/install-on-linode/app.py +++ b/examples/install-on-linode/app.py @@ -1,21 +1,32 @@ import re from flask import Flask, redirect, request, render_template, session, send_from_directory -from flask.ext.session import Session -from linode import LinodeClient, LinodeLoginClient, StackScript, Distribution, Region -from linode import Service, OAuthScopes +from linode_api4 import (LinodeClient, LinodeLoginClient, StackScript, Image, Region, + Type, OAuthScopes) import config +# define our flask app app=Flask(__name__) app.config['SECRET_KEY'] = config.secret_key + def get_login_client(): - return LinodeLoginClient(config.client_id, config.client_secret, base_url=config.login_base_url) + """ + Returns a LinodeLoginClient configured as per the config module in this + example project. + """ + return LinodeLoginClient(config.client_id, config.client_secret) + @app.route('/') def index(): - client = LinodeClient('no-token', base_url=config.api_base_url) - types = client.linode.get_types(Service.label.contains("Linode")) - regions = client.get_regions() + """ + This route renders the main page, where users land when visiting the example + site normally. This will present a simple form to deploy a Linode and allow + them to submit the forum. + """ + client = LinodeClient('no-token') + types = client.linode.types(Type.label.contains("Linode")) + regions = client.regions() stackscript = StackScript(client, config.stackscript_id) return render_template('configure.html', types=types, @@ -24,27 +35,49 @@ def index(): stackscript=stackscript ) + @app.route('/', methods=["POST"]) def start_auth(): + """ + This route is called when the forum rendered by GET / is submitted. This + will store the selections in the Flaks session before redirecting to + login.linode.com to log into configured OAuth Client. + """ login_client = get_login_client() session['dc'] = request.form['region'] session['distro'] = request.form['distribution'] session['type'] = request.form['type'] - return redirect(login_client.generate_login_url(scopes=OAuthScopes.Linodes.all)) + return redirect(login_client.generate_login_url(scopes=OAuthScopes.Linodes.read_write)) + @app.route('/auth_callback') def auth_callback(): + """ + This route is where users who log in to our OAuth Client will be redirected + from login.linode.com; it is responsible for completing the OAuth Workflow + using the Exchange Code provided by the login server, and then proceeding with + application logic. + """ + # complete the OAuth flow by exchanging the Exchange Code we were given + # with login.linode.com to get a working OAuth Token that we can use to + # make requests on the user's behalf. code = request.args.get('code') login_client = get_login_client() - token, scopes = login_client.finish_oauth(code) + token, scopes, _, _ = login_client.finish_oauth(code) - # ensure we have sufficient scopes - if not OAuthScopes.Linodes.delete in scopes: + # ensure we were granted sufficient scopes - this is a best practice, but + # at present users cannot elect to give us lower scopes than what we requested. + # In the future they may be allowed to grant partial access. + if not OAuthScopes.Linodes.read_write in scopes: return render_template('error.html', error='Insufficient scopes granted to deploy {}'\ .format(config.application_name)) - (linode, password) = create_linode(token, session['type'], session['dc'], session['distro']) + # application logic - create the linode + (linode, password) = make_instance(token, session['type'], session['dc'], session['distro']) + # expire the OAuth Token we were given, effectively logging the user out of + # of our application. While this isn't strictly required, it's a good + # practice when the user is done (normally when clicking "log out") get_login_client().expire_token(token) return render_template('success.html', password=password, @@ -52,19 +85,23 @@ def auth_callback(): application_name=config.application_name ) -def create_linode(token, type_id, region_id, distribution_id): - client = LinodeClient('{}'.format(token), base_url=config.api_base_url) + +def make_instance(token, type_id, region_id, distribution_id): + """ + A helper function to create a Linode with the selected fields. + """ + client = LinodeClient('{}'.format(token)) stackscript = StackScript(client, config.stackscript_id) - (linode, password) = client.linode.create_instance(type_id, region_id, + (linode, password) = client.linode.instance_create(type_id, region_id, group=config.application_name, - distribution=distribution_id, stackscript=stackscript.id) + image=distribution_id, stackscript=stackscript.id) if not linode: raise RuntimeError("it didn't work") - - linode.boot() return linode, password + +# This actually starts the application when app.py is run if __name__ == '__main__': app.debug=True app.run() diff --git a/examples/install-on-linode/config.py.example b/examples/install-on-linode/config.py.example index b62058aec..9bd4ed248 100644 --- a/examples/install-on-linode/config.py.example +++ b/examples/install-on-linode/config.py.example @@ -5,9 +5,9 @@ control. OAuth Client Details ==================== -Obtain these values from login.alpha.linode.com -See Authentication (https://developers.linode.com/reference/#authentication) -for details +These values are obtained by creating a new OAuth Client on +https://cloud.linode.com/profile/clients - see the README included here for +more information. """ client_id = 'my-client-id' client_secret = 'my-client-secret' @@ -16,7 +16,7 @@ client_secret = 'my-client-secret' Application Details =================== stackscirpt_id - the stackscript to deploy on Linodes we are creating in -this example application. Run ./create_stackscript.py to generate a public +this example application. Run ./make_stackscript.py to generate a public stackscript and put the ID it returns here. application_name - displayed to the user of this example application and @@ -25,15 +25,6 @@ used in the new Linode's label. Can be any string. secret_key - this flask application's secret key. Not very important since this is an example application and not for production deployment. """ -stackscript_id = 'my-stackscript-id' +stackscript_id = 320826 application_name = 'my-application-name' secret_key = 'my-secret-key' - -""" -Environment Configuration -========================= -These values set the URLs for the LinodeClient and LinodeLoginClient. For -most purposes, these should not need to change. -""" -api_base_url = 'https://api.alpha.linode.com/v4' -login_base_url = 'https://login.alpha.linode.com' diff --git a/examples/install-on-linode/create_stackscript.py b/examples/install-on-linode/create_stackscript.py deleted file mode 100755 index 4299feb01..000000000 --- a/examples/install-on-linode/create_stackscript.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/local/bin/python3 - -from linode import LinodeClient -import config - -token = input("Please provide an OAuth Token: ") -client = LinodeClient(token, base_url=config.api_base_url) -s = client.linode.create_stackscript('Demonstration_Public', '#!/bin/bash', - client.linode.get_distributions(), is_public=True) -print("StackScript created, use this ID: {}".format(s.id)) diff --git a/examples/install-on-linode/make_stackscript.py b/examples/install-on-linode/make_stackscript.py new file mode 100755 index 000000000..89597f325 --- /dev/null +++ b/examples/install-on-linode/make_stackscript.py @@ -0,0 +1,10 @@ +#!/usr/local/bin/python3 + +from linode_api4 import LinodeClient, Image +import config + +token = input("Please provide an OAuth Token: ") +client = LinodeClient(token) +s = client.linode.stackscript_create('Demonstration_Public', '#!/bin/bash', + client.images(Image.is_public==True), is_public=True) +print("StackScript created, use this ID: {}".format(s.id)) diff --git a/examples/install-on-linode/requirements.txt b/examples/install-on-linode/requirements.txt index 4915059c4..9d1aebfa2 100644 --- a/examples/install-on-linode/requirements.txt +++ b/examples/install-on-linode/requirements.txt @@ -1,3 +1,2 @@ -linode-api +linode_api4 Flask -Flask-Session diff --git a/examples/install-on-linode/templates/configure.html b/examples/install-on-linode/templates/configure.html index 92352dd75..92221e607 100644 --- a/examples/install-on-linode/templates/configure.html +++ b/examples/install-on-linode/templates/configure.html @@ -23,16 +23,16 @@

Deploy {{application_name}} to a Linode
- +
diff --git a/examples/install-on-linode/templates/success.html b/examples/install-on-linode/templates/success.html index 2c2f5553c..b20e949ec 100644 --- a/examples/install-on-linode/templates/success.html +++ b/examples/install-on-linode/templates/success.html @@ -7,7 +7,7 @@

Success!

You can access your linode with the following command:

- ssh root@{{linode.ipv4}} + ssh root@{{linode.ipv4[0]}}

Your root password is:

diff --git a/linode/__init__.py b/linode/__init__.py deleted file mode 100644 index 87da2a5f7..000000000 --- a/linode/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from linode.objects import * -from linode.errors import ApiError, UnexpectedResponseError -from linode.util import * -from linode.linode_client import LinodeClient -from linode.login_client import LinodeLoginClient, OAuthScopes diff --git a/linode/errors.py b/linode/errors.py deleted file mode 100644 index 14e60e9e5..000000000 --- a/linode/errors.py +++ /dev/null @@ -1,28 +0,0 @@ -from linode import mappings - -class ApiError(RuntimeError): - """ - An API Error is any error returned from the API. These - typically have a status code in the 400s or 500s. Most - often, this will be caused by invalid input to the API. - """ - def __init__(self, message, status=400, json=None): - super(RuntimeError, self).__init__(message) - self.status = status - self.json = json - self.errors = [] - if json and 'errors' in json and isinstance(json['errors'], list): - self.errors = [ e['reason'] for e in json['errors'] ] - -class UnexpectedResponseError(RuntimeError): - """ - An Unexpected Response Error occurs when the API returns - something that this library is unable to parse, usually - because it expected something specific and didn't get it. - These typically indicate an oversight in developing this - library, and should be fixed with changes to this codebase. - """ - def __init__(self, message, status=200, json=None): - super(RuntimeError, self).__init__(message) - self.status = status - self.json = json diff --git a/linode/linode_client.py b/linode/linode_client.py deleted file mode 100644 index 391a515d0..000000000 --- a/linode/linode_client.py +++ /dev/null @@ -1,460 +0,0 @@ -import json -import requests -import pkg_resources -from datetime import datetime - -from linode.errors import ApiError, UnexpectedResponseError -from linode import mappings -from linode.objects import * -from linode.objects.filtering import Filter -from linode.util import PaginatedList - -package_version = pkg_resources.require("linode-api")[0].version, - -class Group: - def __init__(self, client): - self.client = client - -class LinodeGroup(Group): - def get_distributions(self, *filters): - return self.client._get_and_filter(Distribution, *filters) - - def get_types(self, *filters): - return self.client._get_and_filter(Service, *filters) - - def get_instances(self, *filters): - return self.client._get_and_filter(Linode, *filters) - - def get_stackscripts(self, *filters, **kwargs): - # python2 can't handle *args and a single keyword argument, so this is a workaround - if 'mine_only' in kwargs: - if kwargs['mine_only']: - new_filter = Filter({"mine":True}) - if filters: - filters = [ f for f in filters ] - filters[0] = filters[0] & new_filter - else: - filters = [new_filter] - - del kwargs['mine_only'] - - if kwargs: - raise TypeError("get_stackscripts() got unexpected keyword argument '{}'".format(kwargs.popitem()[0])) - - return self.client._get_and_filter(StackScript, *filters) - - def get_kernels(self, *filters): - return self.client._get_and_filter(Kernel, *filters) - - # create things - def create_instance(self, ltype, region, distribution=None, **kwargs): - ret_pass = None - if distribution and not 'root_pass' in kwargs: - ret_pass = Linode.generate_root_password() - kwargs['root_pass'] = ret_pass - - if 'root_ssh_key' in kwargs: - root_ssh_key = kwargs['root_ssh_key'] - accepted_types = ('ssh-dss', 'ssh-rsa', 'ecdsa-sha2-nistp', 'ssh-ed25519') - if not any([ t for t in accepted_types if root_ssh_key.startswith(t) ]): - # it doesn't appear to be a key.. is it a path to the key? - import os - root_ssh_key = os.path.expanduser(root_ssh_key) - if os.path.isfile(root_ssh_key): - with open(root_ssh_key) as f: - kwargs['root_ssh_key'] = "".join([ l.strip() for l in f ]) - else: - raise ValueError('root_ssh_key must either be a path to the key file or a ' - 'raw public key of one of these types: {}'.format(accepted_types)) - - params = { - 'type': ltype.id if issubclass(type(ltype), Base) else ltype, - 'region': region.id if issubclass(type(region), Base) else region, - 'distribution': (distribution.id if issubclass(type(distribution), Base) else distribution) if distribution else None, - } - params.update(kwargs) - - result = self.client.post('/linode/instances', data=params) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response when creating linode!', json=result) - - l = Linode(self.client, result['id']) - l._populate(result) - if not ret_pass: - return l - else: - return l, ret_pass - - def create_stackscript(self, label, script, distros, desc=None, public=False, **kwargs): - distro_list = None - if type(distros) is list or type(distros) is PaginatedList: - distro_list = [ d.id if issubclass(type(d), Base) else d for d in distros ] - elif type(distros) is Distribution: - distro_list = [ distros.id ] - elif type(distros) is str: - distro_list = [ distros ] - else: - raise ValueError('distros must be a list of Distributions or a single Distribution') - - script_body = script - if not script.startswith("#!"): - # it doesn't look like a stackscript body, let's see if it's a file - import os - if os.path.isfile(script): - with open(script) as f: - script_body = f.read() - else: - raise ValueError("script must be the script text or a path to a file") - - params = { - "label": label, - "distributions": distro_list, - "is_public": public, - "script": script_body, - "description": desc if desc else '', - } - params.update(kwargs) - - result = self.client.post('/linode/stackscripts', data=params) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response when creating StackScript!', json=result) - - s = StackScript(self.client, result['id']) - s._populate(result) - return s - -class AccountGroup(Group): - def get_events(self, *filters): - return self.client._get_and_filter(Event, *filters) - - def mark_last_seen_event(self, event): - """ - Marks event as the last event we have seen. If event is an int, it is treated - as an event_id, otherwise it should be an event object whose id will be used. - """ - last_seen = event if isinstance(event, int) else event.id - self.client.post('{}/seen'.format(Event.api_endpoint), model=Event(self.client, last_seen)) - - def get_profile(self): - """ - Returns this token's user's profile. This is not a listing endpoint. - """ - result = self.client.get('/account/profile') - - if not 'username' in result: - raise UnexpectedResponseError('Unexpected response when getting profile!', json=result) - - p = Profile(self.client, result['username']) - p._populate(result) - return p - - def get_settings(self): - """ - Resturns the account settings data for this acocunt. This is not a - listing endpoint. - """ - result = self.client.get('/account/settings') - - if not 'email' in result: - raise UnexpectedResponseError('Unexpected response when getting account settings!', - json=result) - - s = AccountSettings(self.client, result['email']) - s._populate(result) - return s - - def get_oauth_clients(self, *filters): - """ - Returns the OAuth Clients associated to this account - """ - return self.client._get_and_filter(OAuthClient, *filters) - - def create_oauth_client(self, name, redirect_uri, **kwargs): - """ - Make a new OAuth Client and return it - """ - params = { - "label": name, - "redirect_uri": redirect_uri, - } - params.update(kwargs) - - result = self.client.post('/account/clients', data=params) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response when creating OAuth Client!', - json=result) - - c = OAuthClient(self.client, result['id']) - c._populate(result) - return c - - def get_oauth_tokens(self, *filters): - """ - Returns the OAuth Tokens active for this user - """ - return self.client._get_and_filter(OAuthToken, *filters) - - def create_personal_access_token(self, label=None, expiry=None, scopes=None, **kwargs): - """ - Creates and returns a new Personal Access Token - """ - if label: - kwargs['label'] = label - if expiry: - if isinstance(expiry, datetime): - expiry = datetime.strftime(expiry, "%Y-%m-%dT%H:%M:%S") - kwargs['expiry'] = expiry - if scopes: - kwargs['scopes'] = scopes - - result = self.client.post('/account/tokens', data=kwargs) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response when creating Personal Access ' - 'Token!', json=result) - - t = OAuthToken(self.client, result['id']) - t._populate(result) - return t - - def get_users(self, *filters): - """ - Returns a list of users on this account - """ - return self.client._get_and_filter(User, *filters) - -class NetworkingGroup(Group): - def get_ipv4(self, *filters): - return self.client._get_and_filter(IPAddress, *filters) - - def get_ipv6_ranges(self, *filters): - return self.client._get_and_filter(IPv6Pool, *filters) - - def assign_ips(self, region, *assignments): - """ - This takes a set of IPv4 Assignments and moves the IPs where they were - asked to go. Call this with any number of IPAddress.to(Linode) results - - For example, swapping ips between linode1 and linode2 might look like this: - client.networking.assign_ips('newark', ip1.to(linode2), ip2.to(linode1)) - - """ - for a in assignments: - if not 'address' in a or not 'linode_id' in a: - raise ValueError("Invalid assignment: {}".format(a)) - if isinstance(region, Region): - region = region.id - - result = self.client.post('/networking/ip-assign', data={ - "region": region, - "assignments": [ a for a in assignments ], - }) - - if not 'ips' in result: - raise UnexpectedResponseError('Unexpected response when assigning IPs!', - json=result) - - ips = [] - for r in result['ips']: - i = IPAddress(self.client, r['address']) - i._populate(r) - ips.append(i) - - return ips - -class SupportGroup(Group): - def get_tickets(self, *filters): - return self.client._get_and_filter(SupportTicket, *filters) - - def open_ticket(self, summary, description, regarding=None): - """ - - """ - params = { - "summary": summary, - "description": description, - } - - if regarding: - if isinstance(regarding, Linode): - params['linode_id'] = regarding.id - elif isinstance(regarding, Domain): - params['domain_id'] = regarding.id - elif isinstance(regarding, NodeBalancer): - params['nodebalancer_id'] = regarding.id - else: - raise ValueError('Cannot open ticket regarding type {}!'.format(type(regarding))) - - - result = self.client.post('/support/tickets', data=params) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response when creating ticket!', - json=result) - - t = SupportTicket(self.client, result['id']) - t._populate(result) - return t - -class LinodeClient: - def __init__(self, token, base_url="https://api.linode.com/v4", user_agent=None): - self.base_url = base_url - self._add_user_agent = user_agent - self.token = token - self.linode = LinodeGroup(self) - self.account = AccountGroup(self) - self.networking = NetworkingGroup(self) - self.support = SupportGroup(self) - - @property - def _user_agent(self): - return '{}python-linode-api/{} {}'.format( - '{} '.format(self._add_user_agent) if self._add_user_agent else '', - package_version, - requests.utils.default_user_agent() - ) - - def _api_call(self, endpoint, model=None, method=None, data=None, filters=None): - """ - Makes a call to the linode api. Data should only be given if the method is - POST or PUT, and should be a dictionary - """ - if not self.token: - raise RuntimeError("You do not have an API token!") - - if not method: - raise ValueError("Method is required for API calls!") - - if model: - endpoint = endpoint.format(**{ k: str(vars(model)[k]) for k in vars(model) if 'id' in k }) - url = '{}{}'.format(self.base_url, endpoint) - headers = { - 'Authorization': "token {}".format(self.token), - 'Content-Type': 'application/json', - 'User-Agent': self._user_agent, - } - - if filters: - headers['X-Filter'] = json.dumps(filters) - - body = json.dumps(data) - - r = method(url, headers=headers, data=body) - - if 399 < r.status_code < 600: - j = None - error_msg = '{}: '.format(r.status_code) - try: - j = r.json() - if 'errors' in j.keys(): - for e in j['errors']: - error_msg += '{}; '.format(e['reason']) \ - if 'reason' in e.keys() else '' - except: - pass - raise ApiError(error_msg, status=r.status_code, json=j) - - j = r.json() - - return j - - def _get_objects(self, endpoint, cls, model=None, parent_id=None, filters=None): - json = self.get(endpoint, model=model, filters=filters) - - if not cls.api_name in json: - return False - - if 'total_pages' in json: - formatted_endpoint = endpoint - if model: - formatted_endpoint = formatted_endpoint.format(**vars(model)) - return mappings.make_paginated_list(json, cls.api_name, self, parent_id=parent_id, \ - page_url=formatted_endpoint[1:], cls=cls) - return mappings.make_list(json[cls.api_name], self, parent_id=parent_id, cls=cls) - - def get(self, *args, **kwargs): - return self._api_call(*args, method=requests.get, **kwargs) - - def post(self, *args, **kwargs): - return self._api_call(*args, method=requests.post, **kwargs) - - def put(self, *args, **kwargs): - return self._api_call(*args, method=requests.put, **kwargs) - - def delete(self, *args, **kwargs): - return self._api_call(*args, method=requests.delete, **kwargs) - - # ungrouped list functions - def get_regions(self, *filters): - return self._get_and_filter(Region, *filters) - - def get_domains(self, *filters): - return self._get_and_filter(Domain, *filters) - - def get_nodebalancers(self, *filters): - return self._get_and_filter(NodeBalancer, *filters) - - def create_nodebalancer(self, region, **kwargs): - params = { - "region": region.id if isinstance(region, Base) else region, - } - params.update(kwargs) - - result = self.post('/nodebalancers', data=params) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response when creating Nodebalaner!', json=result) - - n = NodeBalancer(self, result['id']) - n._populate(result) - return n - - def create_domain(self, domain, master=True, **kwargs): - params = { - 'domain': domain, - 'type': 'master' if master else 'slave', - } - params.update(kwargs) - - result = self.post('/domains', data=params) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response when creating Domain!', json=result) - - d = Domain(self, result['id']) - d._populate(result) - return d - - # helper functions - def _filter_list(self, results, **filter_by): - if not results or not len(results): - return results - - if not filter_by or not len(filter_by): - return results - - for key in filter_by.keys(): - if not key in vars(results[0]): - raise ValueError("Cannot filter {} by {}".format(type(results[0]), key)) - if isinstance(vars(results[0])[key], Base) and isinstance(filter_by[key], Base): - results = [ r for r in results if vars(r)[key].id == filter_by[key].id ] - elif isinstance(vars(results[0])[key], str) and isinstance(filter_by[key], str): - results = [ r for r in results if filter_by[key].lower() in vars(r)[key].lower() ] - else: - results = [ r for r in results if vars(r)[key] == filter_by[key] ] - - return results - - def _get_and_filter(self, obj_type, *filters): - parsed_filters = None - if filters: - if(len(filters) > 1): - from linode.objects.filtering import and_ - parsed_filters = and_(*filters).dct - else: - parsed_filters = filters[0].dct - - return self._get_objects(obj_type.api_list(), obj_type, filters=parsed_filters) diff --git a/linode/login_client.py b/linode/login_client.py deleted file mode 100644 index f1332343c..000000000 --- a/linode/login_client.py +++ /dev/null @@ -1,181 +0,0 @@ -import requests -from enum import Enum -from linode.errors import ApiError - -try: - from urllib.parse import urlparse - from urllib.parse import urlencode - from urllib.parse import urlunparse -except ImportError: - from urlparse import urlparse - from urllib import urlencode - from urlparse import urlunparse - -class AllWrapper(): - def __repr__(self): - return '*' - -class OAuthScopes: - - all = AllWrapper() - - class Linodes(Enum): - view = 0 - create = 1 - modify = 2 - delete = 3 - all = 4 - - def __repr__(self): - if(self.name == 'all'): - return "linodes:*" - return "linodes:{}".format(self.name) - - class Domains(Enum): - view = 0 - create = 1 - modify = 2 - delete = 3 - all = 4 - - def __repr__(self): - if(self.name == 'all'): - return "domains:*" - return "domains:{}".format(self.name) - - class StackScripts(Enum): - view = 0 - create = 1 - modify = 2 - delete = 3 - all = 4 - - def __repr__(self): - if(self.name == 'all'): - return "stackscripts:*" - return "stackscripts:{}".format(self.name) - - class Users(Enum): - view = 0 - create = 1 - modify = 2 - delete = 3 - all = 4 - - def __repr__(self): - if(self.name == 'all'): - return "users:*" - return "users:{}".format(self.name) - - class Tokens(Enum): - view = 0 - create = 1 - modify = 2 - delete = 3 - all = 4 - - def __repr__(self): - if(self.name == 'all'): - return "tokens:*" - return "tokens:{}".format(self.name) - - _scope_families = { - 'linodes': Linodes, - 'domains': Domains, - 'stackscripts': StackScripts, - 'users': Users, - 'tokens': Tokens, - } - - @staticmethod - def parse(scopes): - ret = [] - - # special all-scope case - if scopes == '*': - return [ getattr(OAuthScopes._scope_families[s], 'all') - for s in OAuthScopes._scope_families ] - - for scope in scopes.split(','): - resource = access = None - if ':' in scope: - resource, access = scope.split(':') - else: - resource = scope - access = '*' - - parsed_scope = OAuthScopes._get_parsed_scope(resource, access) - if parsed_scope: - ret.append(parsed_scope) - - return ret - - @staticmethod - def _get_parsed_scope(resource, access): - resource = resource.lower() - access = access.lower() - if resource in OAuthScopes._scope_families: - if access == '*': - access = 'delete' - if hasattr(OAuthScopes._scope_families[resource], access): - return getattr(OAuthScopes._scope_families[resource], access) - - return None - - @staticmethod - def serialize(scopes): - ret = '' - if not type(scopes) is list: - scopes = [ scopes ] - for scope in scopes: - ret += "{},".format(repr(scope)) - if ret: - ret = ret[:-1] - return ret - -class LinodeLoginClient: - def __init__(self, client_id, client_secret, - base_url="https://login.linode.com"): - self.base_url = base_url - self.client_id = client_id - self.client_secret = client_secret - - def _login_uri(self, path): - return "{}{}".format(self.base_url, path) - - def generate_login_url(self, scopes=None, redirect_uri=None): - url = self.base_url + "/oauth/authorize" - split = list(urlparse(url)) - params = { - "client_id": self.client_id, - } - if scopes: - params["scopes"] = OAuthScopes.serialize(scopes) - if redirect_uri: - params["redirect_uri"] = redirect_uri - split[4] = urlencode(params) - return urlunparse(split) - - def finish_oauth(self, code): - r = requests.post(self._login_uri("/oauth/token"), data={ - "code": code, - "client_id": self.client_id, - "client_secret": self.client_secret - }) - if r.status_code != 200: - raise ApiError("OAuth token exchange failed", r) - token = r.json()["access_token"] - scopes = OAuthScopes.parse(r.json()["scopes"]) - return token, scopes - - def expire_token(self, token): - r = requests.post(self._login_uri("/oauth/token/expire"), - data={ - "client_id": self.client_id, - "client_secret": self.client_secret, - "token": token, - }) - - if r.status_code != 200: - raise ApiError("Failed to expire token!", r) - return True diff --git a/linode/mappings.py b/linode/mappings.py deleted file mode 100644 index 8d7b08cd0..000000000 --- a/linode/mappings.py +++ /dev/null @@ -1,38 +0,0 @@ -from . import objects -from linode import util - -def make(id, client, parent_id=None, cls=None): - """ - Makes an api object based on an id. The type depends on the mapping. - """ - if cls: - if issubclass(cls, objects.DerivedBase): - return cls(client, id, parent_id) - else: - return cls(client, id) - return None - -def make_list(json_arr, client, parent_id=None, cls=None): - result = [] - - for obj in json_arr: - id_val = None - - if 'id' in obj: - id_val = obj['id'] - elif hasattr(cls, 'id_attribute') and getattr(cls, 'id_attribute') in obj: - id_val = obj[getattr(cls, 'id_attribute')] - else: - continue - o = make(id_val, client, parent_id=parent_id, cls=cls) - o._populate(obj) - result.append(o) - - return result - -def make_paginated_list(json, key, client, parent_id=None, page_url=None, cls=None): - l = make_list(json[key], client, parent_id=parent_id, cls=cls) - p = util.PaginatedList(client, page_url if page_url else key, page=l, \ - max_pages=json['total_pages'], total_items=json['total_results'], parent_id=parent_id, \ - key=key) - return p diff --git a/linode/objects/__init__.py b/linode/objects/__init__.py deleted file mode 100644 index b2a3714e7..000000000 --- a/linode/objects/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .base import Base, Property -from .dbase import DerivedBase -from .region import Region -from .filtering import and_, or_ -from .linode import * -from .domain import * -from .account import * -from .networking import * -from .nodebalancer import * -from .support import * diff --git a/linode/objects/account/__init__.py b/linode/objects/account/__init__.py deleted file mode 100644 index 7e174b7ee..000000000 --- a/linode/objects/account/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .user import User -from .event import Event -from .profile import Profile -from .oauth_client import OAuthClient -from .oauth_token import OAuthToken -from .settings import AccountSettings -from .user_grant import UserGrants diff --git a/linode/objects/account/event.py b/linode/objects/account/event.py deleted file mode 100644 index 4667c36a0..000000000 --- a/linode/objects/account/event.py +++ /dev/null @@ -1,58 +0,0 @@ -from .. import Base, Property -from .. import Linode, StackScript, Domain -from linode.objects.nodebalancer.nodebalancer import NodeBalancer -from linode.objects.support.ticket import SupportTicket - -from random import choice - -class Event(Base): - api_name = 'events' - api_endpoint = '/account/events/{id}' - properties = { - 'id': Property(identifier=True), - 'percent_complete': Property(volatile=True), - 'created': Property(is_datetime=True, filterable=True), - 'updated': Property(is_datetime=True, filterable=True), - 'seen': Property(), - 'read': Property(), - 'action': Property(), - 'user_id': Property(), - 'username': Property(), - 'entity': Property(), - 'time_remaining': Property(), - 'rate': Property(), - 'status': Property(), - } - - @property - def linode(self): - if self.entity and self.entity.type == 'linode': - return Linode(self._client, self.entity.id) - return None - - @property - def stackscript(self): - if self.entity and self.entity.type == 'stackscript': - return Stackscript(self._client, self.entity.id) - return None - - @property - def domain(self): - if self.entity and self.entity.type == 'domain': - return Domain(self._client, self.entity.id) - return None - - @property - def nodebalancer(self): - if self.entity and self.entity.type == 'nodebalancer': - return NodeBalancer(self._client, self.entity.id) - return None - - @property - def ticket(self): - if self.entity and self.entity.type == 'ticket': - return SupportTicket(self._client, self.entity.id) - return None - - def mark_read(self): - self._client.post('{}/read'.format(Event.api_endpoint), model=self) diff --git a/linode/objects/account/oauth_client.py b/linode/objects/account/oauth_client.py deleted file mode 100644 index f955b795d..000000000 --- a/linode/objects/account/oauth_client.py +++ /dev/null @@ -1,81 +0,0 @@ -import os -import requests - -from ...errors import ApiError, UnexpectedResponseError -from linode.objects import Base, Property - -class OAuthClient(Base): - api_name = 'clients' - api_endpoint = "/account/clients/{id}" - - properties = { - "id": Property(identifier=True), - "name": Property(mutable=True, filterable=True), - "secret": Property(), - "redirect_uri": Property(mutable=True), - "status": Property(), - } - - def reset_secret(self): - """ - Resets the client secret for this client. - """ - result = self._client.post("{}/reset_secret".format(OAuthClient.api_endpoint), model=self) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response when resetting secret!', json=result) - - self._populate(result) - return self.secret - - def get_thumbnail(self, dump_to=None): - """ - This returns binary data that represents a 128x128 image. - If dump_to is given, attempts to write the image to a file - at the given location. - """ - headers = { - "Authorization": "token {}".format(self._client.token) - } - - result = requests.get('{}/{}/thumbnail'.format(self._client.base_url, - OAuthClient.api_endpoint.format(id=self.id)), - headers=headers) - - if not result.status_code == 200: - return False # TODO - handle this better? - - if dump_to: - with open(dump_to, 'wb+') as f: - f.write(result.content) - else: - return result.content - - def set_thumbnail(self, thumbnail): - """ - Sets the thumbnail for this OAuth Client. If thumbnail is bytes, - uploads it as a png. Otherwise, assumes thumbnail is a path to the - thumbnail and reads it in as bytes before uploading. - """ - headers = { - "Authorization": "token {}".format(self._client.token), - "Content-type": "image/png", - } - - # TODO this check needs to be smarter - python2 doesn't do it right - if not isinstance(thumbnail, bytes): - with open(thumbnail, 'rb') as f: - thumbnail = f.read() - - result = requests.put('{}/{}/thumbnail'.format(self._client.base_url, - OAuthClient.api_endpoint.format(id=self.id)), - headers=headers, data=thumbnail) - - if not result.status_code == 200: - errors = [] - j = result.json() - if 'errors' in j: - errors = [ e['reason'] for e in j['errors'] ] - raise ApiError('{}: {}'.format(result.status_code, errors), json=j) - - return True diff --git a/linode/objects/account/oauth_token.py b/linode/objects/account/oauth_token.py deleted file mode 100644 index d3cba360a..000000000 --- a/linode/objects/account/oauth_token.py +++ /dev/null @@ -1,17 +0,0 @@ -from linode.objects import Base, Property -from linode.objects.account import OAuthClient - -class OAuthToken(Base): - api_name = 'tokens' - api_endpoint = "/account/tokens/{id}" - - properties = { - "id": Property(identifier=True), - "client": Property(relationship=OAuthClient), - "type": Property(), - "scopes": Property(), - "label": Property(mutable=True), - "created": Property(is_datetime=True), - "token": Property(), - "expiry": Property(is_datetime=True), - } diff --git a/linode/objects/account/profile.py b/linode/objects/account/profile.py deleted file mode 100644 index d22b8dd92..000000000 --- a/linode/objects/account/profile.py +++ /dev/null @@ -1,67 +0,0 @@ -from ...errors import UnexpectedResponseError -from linode.objects import Base, Property - -class Profile(Base): - api_name = 'profile' - api_endpoint = "/account/profile" - id_attribute = 'username' - - properties = { - 'username': Property(identifier=True), - 'email': Property(mutable=True), - 'timezone': Property(mutable=True), - 'email_notifications': Property(mutable=True), - 'referrals': Property(), - 'ip_whitelist_enabled': Property(mutable=True), - 'lish_auth_method': Property(mutable=True), - 'authorized_keys': Property(mutable=True), - 'two_factor_auth': Property(), - 'restricted': Property(), - } - - def reset_password(self, password): - """ - Resets the password of the token's user. - """ - result = self._client.post('/account/profile/password', data={ "password": password }) - - return result - - def enable_tfa(self): - """ - Enables TFA for the token's user. This requies a follow-up request - to confirm TFA. Returns the TFA secret that needs to be confirmed. - """ - result = self._client.post('/account/profile/tfa-enable') - - return result['secret'] - - def confirm_tfa(self, code): - """ - Confirms TFA for an account. Needs a TFA code generated by enable_tfa - """ - result = self._client.post('/account/profile/tfa-enable-confirm', data={ - "tfa_code": code - }) - - return True - - def disable_tfa(self): - """ - Turns off TFA for this user's account. - """ - result = self._client.post('/account/profile/tfa-disable') - - return True - - @property - def grants(self): - """ - Returns grants for the current user - """ - from linode.objects.account import UserGrants - resp = self._client.get(UserGrants.api_endpoint.format(username=self.username)) - - grants = UserGrants(self._client, self.username) - grants._populate(resp) - return grants diff --git a/linode/objects/account/settings.py b/linode/objects/account/settings.py deleted file mode 100644 index 5077aeef0..000000000 --- a/linode/objects/account/settings.py +++ /dev/null @@ -1,23 +0,0 @@ -from linode.objects import Base, Property - -class AccountSettings(Base): - api_name = 'settings' # should never come up - api_endpoint = "/account/settings" - id_attribute = 'email' - - properties = { - "company": Property(mutable=True), - "country": Property(mutable=True), - "balance": Property(), - "address_1": Property(mutable=True), - "network_helper": Property(mutable=True), - "last_name": Property(mutable=True), - "city": Property(mutable=True), - "state": Property(mutable=True), - "first_name": Property(mutable=True), - "phone": Property(mutable=True), - "managed": Property(), - "email": Property(mutable=True), - "zip": Property(mutable=True), - "address_2": Property(mutable=True), - } diff --git a/linode/objects/account/user.py b/linode/objects/account/user.py deleted file mode 100644 index ca632a6aa..000000000 --- a/linode/objects/account/user.py +++ /dev/null @@ -1,38 +0,0 @@ -from linode.objects import Base, Property - -class User(Base): - api_name = 'users' - api_endpoint = "/account/users/{id}" - id_attribute = 'username' - - properties = { - 'email': Property(mutable=True), - 'username': Property(identifier=True, mutable=True), - 'restricted': Property(mutable=True), - } - - @property - def grants(self): - from linode.objects.account import UserGrants - if not hasattr(self, '_grants'): - resp = self._client.get(UserGrants.api_endpoint.format(username=self.username)) - - grants = UserGrants(self._client, self.username) - grants._populate(resp) - self._set('_grants', grants) - - return self._grants - - def invalidate(self): - if hasattr(self, '_grants'): - del self._grants - Base.invalidate(self) - - def change_password(self, password): - """ - Sets this user's password - """ - result = self._client.post('{}/password'.format(User.api_endpoint), - model=self, data={ "password": password }) - - return True diff --git a/linode/objects/account/user_grant.py b/linode/objects/account/user_grant.py deleted file mode 100644 index 31ab797de..000000000 --- a/linode/objects/account/user_grant.py +++ /dev/null @@ -1,72 +0,0 @@ -from linode.objects import Base, DerivedBase, Linode, Domain, StackScript - -normal_grants = ('all','access','delete') -stackscript_grants = ('all','use','edit','delete') -linode_grants = ('all','access','delete','resize') - -obj_grants = ( ('linode', Linode), ('dnszone', Domain), ('stackscript', StackScript) )#, ('nodebalancer', NodeBalancer) ) - -class Grant: - def __init__(self, client, cls, dct): - self._client = client - self.cls = cls - self.id = dct['id'] - self.label = dct['label'] - self.grants = normal_grants - if cls is Linode: - self.grants = linode_grants - elif cls is StackScript: - self.grants = stackscript_grants - - for g in self.grants: - setattr(self, g, dct[g]) - - @property - def entity(self): - # there are no grants for derived types, so this shouldn't happen - if not issubclass(self.cls, Base) or issubclass(self.cls, DerivedBase): - raise ValueError("Cannot get entity for non-base-class {}".format(self.cls)) - return self.cls(self._client, self.id) - - def _serialize(self): - """ - Returns this grant in a PUT-able form - """ - ret = { g: getattr(self, g) for g in self.grants } - ret['id'] = self.id - return ret - -class UserGrants: - api_endpoint = "/account/users/{username}/grants" - parent_id_name = 'username' - - def __init__(self, client, username): - self._client = client - self.username = username - - def _populate(self, json): - self.global_grants = type('global_grants', (object,), json['global']) - self.customer = type('customer_grants', (object,), json['customer']) - - for key, cls in obj_grants: - lst = [] - for gdct in json[key]: - lst.append(Grant(self._client, cls, gdct)) - setattr(self, key, lst) - - def save(self): - req = { - 'global': { k: v for k,v in vars(self.global_grants).items() if not k.startswith('_') }, - 'customer': { k: v for k,v in vars(self.customer).items() if not k.startswith('_') }, - } - - for key, _ in obj_grants: - lst = [] - for cg in getattr(self, key): - lst.append(cg._serialize()) - req[key] = lst - print(req) - - result = self._client.put(UserGrants.api_endpoint.format(username=self.username), data=req) - - self._populate(result) diff --git a/linode/objects/base.py b/linode/objects/base.py deleted file mode 100644 index a914f0c7c..000000000 --- a/linode/objects/base.py +++ /dev/null @@ -1,173 +0,0 @@ -from .. import mappings -from .filtering import FilterableMetaclass - -from future.utils import with_metaclass -from datetime import datetime, timedelta -import time - -# The interval to reload volatile properties -volatile_refresh_timeout = timedelta(seconds=15) - -class Property: - def __init__(self, mutable=False, identifier=False, volatile=False, relationship=None, \ - derived_class=None, is_datetime=False, filterable=False): - self.mutable = mutable - self.identifier = identifier - self.volatile = volatile - self.relationship = relationship - self.derived_class = derived_class - self.is_datetime = is_datetime - self.filterable = filterable - -class MappedObject: - def __init__(self, **vals): - self._expand_vals(self.__dict__, **vals) - - def _expand_vals(self, target, **vals): - for v in vals: - if type(vals[v]) is dict: - vals[v] = MappedObject(**vals[v]) - elif type(vals[v]) is list: - # oh mama - vals[v] = [ MappedObject(**i) if type(i) is dict else i for i in vals[v] ] - target.update(vals) - - def __repr__(self): - return "Mapping containing {}".format(vars(self).keys()) - -class Base(object, with_metaclass(FilterableMetaclass)): - """ - The Base class knows how to look up api properties of a model, and lazy-load them. - """ - properties = {} - - def __init__(self, client, id): - self._set('_populated', False) - self._set('_last_updated', datetime.min) - self._set('_client', client) - - for prop in type(self).properties: - self._set(prop, None) - - self._set('id', id) - if hasattr(type(self), 'id_attribute'): - self._set(getattr(type(self), 'id_attribute'), id) - - def __getattribute__(self, name): - if name in type(self).properties.keys(): - if type(self).properties[name].identifier: - pass # don't load identifiers from the server, we have those - elif (object.__getattribute__(self, name) is None and not self._populated \ - or type(self).properties[name].derived_class) \ - or (type(self).properties[name].volatile \ - and object.__getattribute__(self, '_last_updated') - + volatile_refresh_timeout < datetime.now()): - if type(self).properties[name].derived_class: - #load derived object(s) - self._set(name, type(self).properties[name].derived_class - ._api_get_derived(self, getattr(self, '_client'))) - else: - self._api_get() - - return object.__getattribute__(self, name) - - def __repr__(self): - return "{}: {}".format(type(self).__name__, self.id) - - def __setattr__(self, name, value): - if name in type(self).properties.keys() and not type(self).properties[name].mutable: - raise AttributeError("'{}' is not a mutable field of '{}'" - .format(name, type(self).__name__)) - self._set(name, value) - - def save(self): - resp = self._client.put(type(self).api_endpoint, model=self, - data=self._serialize()) - - if 'error' in resp: - return False - return True - - def delete(self): - resp = self._client.delete(type(self).api_endpoint, model=self) - - if 'error' in resp: - return False - self.invalidate() - return True - - def invalidate(self): - for key in (k for k in type(self).properties.keys() - if not type(self).properties[k].identifier): - self._set(key, None) - - self._populated = False - - def _serialize(self): - result = { a: getattr(self, a) for a in type(self).properties - if type(self).properties[a].mutable } - - for k, v in result.items(): - if isinstance(v, Base): - result[k] = v.id - - return result - - def _api_get(self): - json = self._client.get(type(self).api_endpoint, model=self) - self._populate(json) - - def _populate(self, json): - if not json: - return - - for key in json: - if key in (k for k in type(self).properties.keys() - if not type(self).properties[k].identifier): - if type(self).properties[key].relationship \ - and not json[key] is None: - if isinstance(json[key], list): - objs = [] - for d in json[key]: - if not 'id' in d: - continue - obj = mappings.make(d['id'], getattr(self,'_client'), - cls=type(self).properties[key].relationship) - if obj: - obj._populate(d) - objs.append(obj) - self._set(key, objs) - else: - if isinstance(json[key], dict): - related_id = json[key]['id'] - else: - related_id = json[key] - obj = mappings.make(related_id, getattr(self,'_client'), - cls=type(self).properties[key].relationship) - if obj and isinstance(json[key], dict): - obj._populate(json[key]) - self._set(key, obj) - elif type(json[key]) is dict: - self._set(key, MappedObject(**json[key])) - elif type(json[key]) is list: - mapping = MappedObject(_list=json[key]) - self._set(key, mapping._list) - elif type(self).properties[key].is_datetime: - try: - t = time.strptime(json[key], "%Y-%m-%dT%H:%M:%S") - self._set(key, datetime.fromtimestamp(time.mktime(t))) - except: - #TODO - handle this better (or log it?) - self._set(key, json[key]) - else: - self._set(key, json[key]) - - self._set('_populated', True) - self._set('_last_updated', datetime.now()) - - def _set(self, name, value): - object.__setattr__(self, name, value) - - @classmethod - def api_list(cls): - return '/'.join(cls.api_endpoint.split('/')[:-1]) diff --git a/linode/objects/dbase.py b/linode/objects/dbase.py deleted file mode 100644 index e0a05fa41..000000000 --- a/linode/objects/dbase.py +++ /dev/null @@ -1,22 +0,0 @@ -from .base import Base -from .. import errors - -class DerivedBase(Base): - """ - The DerivedBase class holds information about an object who belongs to another object - (for example, a disk belongs to a linode). These objects have their own endpoints, - but they are below another object in the hierarchy (i.e. /linodes/lnde_123/disks/disk_123) - """ - derived_url_path = '' #override in child classes - parent_id_name = 'parent_id' #override in child classes - - def __init__(self, client, id, parent_id): - Base.__init__(self, client, id) - - self._set(type(self).parent_id_name, parent_id) - - @classmethod - def _api_get_derived(cls, parent, client): - base_url = "{}/{}".format(type(parent).api_endpoint, cls.derived_url_path) - - return client._get_objects(base_url, cls, model=parent, parent_id=parent.id) diff --git a/linode/objects/domain/__init__.py b/linode/objects/domain/__init__.py deleted file mode 100644 index 73b686457..000000000 --- a/linode/objects/domain/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .domain import Domain -from .record import DomainRecord diff --git a/linode/objects/domain/domain.py b/linode/objects/domain/domain.py deleted file mode 100644 index 388d0cdde..000000000 --- a/linode/objects/domain/domain.py +++ /dev/null @@ -1,40 +0,0 @@ -from ...errors import UnexpectedResponseError -from linode.objects import Base, Property -from .record import DomainRecord - -class Domain(Base): - api_name = 'domains' - api_endpoint = "/domains/{id}" - properties = { - 'id': Property(identifier=True), - 'domain': Property(mutable=True, filterable=True), - 'group': Property(mutable=True, filterable=True), - 'description': Property(mutable=True), - 'status': Property(mutable=True), - 'soa_email': Property(mutable=True), - 'retry_sec': Property(mutable=True), - 'master_ips': Property(mutable=True, filterable=True), - 'axfr_ips': Property(mutable=True), - 'expire_sec': Property(mutable=True), - 'refresh_sec': Property(mutable=True), - 'ttl_sec': Property(mutable=True), - 'records': Property(derived_class=DomainRecord), - 'type': Property(mutable=True), - } - - def create_record(self, record_type, **kwargs): - - params = { - "type": record_type, - } - params.update(kwargs) - - result = self._client.post("{}/records".format(Domain.api_endpoint), model=self, data=params) - self.invalidate() - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response creating domain record!', json=result) - - zr = DomainRecord(self._client, result['id'], self.id) - zr._populate(result) - return zr diff --git a/linode/objects/domain/record.py b/linode/objects/domain/record.py deleted file mode 100644 index b9c3169e2..000000000 --- a/linode/objects/domain/record.py +++ /dev/null @@ -1,22 +0,0 @@ -from linode.objects import DerivedBase -from linode.objects import Property - -class DomainRecord(DerivedBase): - api_name = "records" - api_endpoint = "/doamins/{domain_id}/records/{id}" - derived_url_path = "records" - parent_id_name = "domain_id" - - properties = { - 'id': Property(identifier=True), - 'domain_id': Property(identifier=True), - 'type': Property(), - 'name': Property(mutable=True, filterable=True), - 'target': Property(mutable=True, filterable=True), - 'priority': Property(mutable=True), - 'weight': Property(mutable=True), - 'port': Property(mutable=True), - 'service': Property(mutable=True), - 'protocol': Property(mutable=True), - 'ttl_sec': Property(mutable=True), - } diff --git a/linode/objects/filtering.py b/linode/objects/filtering.py deleted file mode 100644 index 88b3bd517..000000000 --- a/linode/objects/filtering.py +++ /dev/null @@ -1,122 +0,0 @@ -def or_(a, b): - if not isinstance(a, Filter) or not isinstance(b, Filter): - raise TypeError - return a.__or__(b) - -def and_(a, b): - return a.__and__(b) - -class Filter: - def __init__(self, dct): - self.dct = dct - - def __or__(self, other): - if not isinstance(other, Filter): - raise TypeError("You can only or Filter types!") - if '+or' in self.dct: - return Filter({ '+or': self.dct['+or'] + [ other.dct ] }) - else: - return Filter({ '+or': [self.dct, other.dct] }) - - def __and__(self, other): - if not isinstance(other, Filter): - raise TypeError("You can only and Filter types!") - if '+and' in self.dct: - return Filter({ '+and': self.dct['+and'] + [ other.dct ] }) - else: - return Filter({ '+and': [self.dct, other.dct] }) - - def order_by(self, field, desc=False): - # we can't include two order_bys - if '+order_by' in self.dct: - raise AssertionError("You may only order by once!") - - if not isinstance(field, FilterableAttribute): - raise TypeError("Can only order by filterable attributes!") - - self.dct['+order_by'] = field.name - if desc: - self.dct['+order'] = 'desc' - - return self - - def limit(self, limit): - # we can't limit twice - if '+limit' in self.dct: - raise AssertionError("You may only limit once!") - - if not type(limit) == int: - raise TypeError("Limit must be an int!") - - self.dct['+limit'] = limit - - return self - -class FilterableAttribute: - def __init__(self, name): - self.name = name - - def __eq__(self, other): - return Filter({ self.name: other }) - - def __ne__(self, other): - return Filter({ self.name: { "+ne": other } }) - - # "in" evaluates the return value - have to use - # type.contains instead - def contains(self, other): - return Filter({ self.name: { "+contains": other } }) - - def __gt__(self, other): - return Filter({ self.name: { "+gt": other } }) - - def __lt__(self, other): - return Filter({ self.name: { "+lt": other } }) - - def __ge__(self, other): - return Filter({ self.name: { "+gte": other } }) - - def __le__(self, other): - return Filter({ self.name: { "+lte": other } }) - -class NonFilterableAttribute: - """This class is used to provide better error messages should a user attempt - to filter an object on an attribute that is defined in properties, but is - not filterable (otherwise they'd see "{} has no attribute {}" which is less - obvious - """ - def __init__(self, clsname, atrname): - self.clsname = clsname - self.atrname = atrname - - def __eq__(self, other): - raise AttributeError("{} cannot be filtered by {}".format(self.clsname, self.atrname)) - - def __ne__(self, other): - raise AttributeError("{} cannot be filtered by {}".format(self.clsname, self.atrname)) - - def contains(self, other): - raise AttributeError("{} cannot be filtered by {}".format(self.clsname, self.atrname)) - - def __gt__(self, other): - raise AttributeError("{} cannot be filtered by {}".format(self.clsname, self.atrname)) - - def __lt__(self, other): - raise AttributeError("{} cannot be filtered by {}".format(self.clsname, self.atrname)) - - def __ge__(self, other): - raise AttributeError("{} cannot be filtered by {}".format(self.clsname, self.atrname)) - - def __le__(self, other): - raise AttributeError("{} cannot be filtered by {}".format(self.clsname, self.atrname)) - -class FilterableMetaclass(type): - def __init__(cls, name, bases, dct): - if hasattr(cls, 'properties'): - for key in cls.properties.keys(): - #if cls.properties[key].filterable: - setattr(cls, key, FilterableAttribute(key)) - #else: - # setattr(cls, key, NonFilterableAttribute(cls.__name__, key)) - - super(FilterableMetaclass, cls).__init__(name, bases, dct) diff --git a/linode/objects/linode/__init__.py b/linode/objects/linode/__init__.py deleted file mode 100644 index 9a616b95c..000000000 --- a/linode/objects/linode/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .linode import Linode -from .kernel import Kernel -from .disk import Disk -from .stackscript import StackScript -from .config import Config -from .distribution import Distribution -from .backup import Backup -from .service import Service diff --git a/linode/objects/linode/backup.py b/linode/objects/linode/backup.py deleted file mode 100644 index 60aa28f16..000000000 --- a/linode/objects/linode/backup.py +++ /dev/null @@ -1,32 +0,0 @@ -from .. import DerivedBase, Property, Base - -class Backup(DerivedBase): - api_name = 'backups' - api_endpoint = '/linode/instances/{linode_id}/backups/{id}' - derived_url_path = 'backups' - parent_id_name='linode_id' - - properties = { - 'id': Property(identifier=True), - 'created': Property(is_datetime=True), - 'duration': Property(), - 'finished': Property(is_datetime=True), - 'message': Property(), - 'status': Property(volatile=True), - 'type': Property(), - 'linode_id': Property(identifier=True), - 'label': Property(), - 'configs': Property(), - 'disks': Property(), - 'availability': Property(), - } - - def restore_to(self, linode, **kwargs): - d = { - "linode": linode.id if issubclass(type(linode), Base) else linode, - } - d.update(kwargs) - - result = self._client.post("{}/restore".format(Backup.api_endpoint), model=self, - data=d) - return True diff --git a/linode/objects/linode/config.py b/linode/objects/linode/config.py deleted file mode 100644 index 544124cdc..000000000 --- a/linode/objects/linode/config.py +++ /dev/null @@ -1,42 +0,0 @@ -from .. import DerivedBase, Property -from .kernel import Kernel -from .disk import Disk - -class Config(DerivedBase): - api_name="configs" - api_endpoint="/linode/instances/{linode_id}/configs/{id}" - derived_url_path="configs" - parent_id_name="linode_id" - - properties = { - "id": Property(identifier=True), - "linode_id": Property(identifier=True), - "helpers": Property(),#TODO: mutable=True), - "created": Property(is_datetime=True), - "root_device": Property(mutable=True), - "kernel": Property(relationship=Kernel, mutable=True, filterable=True), - "disks": Property(filterable=True),#TODO: mutable=True), - "initrd": Property(relationship=Disk), - "updated": Property(), - "comments": Property(mutable=True, filterable=True), - "label": Property(mutable=True, filterable=True), - "devtmpfs_automount": Property(mutable=True, filterable=True), - "root_device_ro": Property(mutable=True, filterable=True), - "run_level": Property(mutable=True, filterable=True), - "virt_mode": Property(mutable=True, filterable=True), - "ram_limit": Property(mutable=True, filterable=True), - } - - def _populate(self, json): - """ - Override popupate to map the disks more nicely - """ - DerivedBase._populate(self, json) - - import linode.mappings as mapper - - for key in vars(self.disks): - if self.disks.__getattribute__(key): - self.disks.__setattr__(key, - mapper.make(self.disks.__getattribute__(key), - self._client, parent_id=self.linode_id, cls=Disk)) diff --git a/linode/objects/linode/disk.py b/linode/objects/linode/disk.py deleted file mode 100644 index 137d71f86..000000000 --- a/linode/objects/linode/disk.py +++ /dev/null @@ -1,51 +0,0 @@ -from ...errors import UnexpectedResponseError -from .. import DerivedBase, Property - -class Disk(DerivedBase): - api_name = 'disks' - api_endpoint = '/linode/instances/{linode_id}/disks/{id}' - derived_url_path = 'disks' - parent_id_name='linode_id' - - properties = { - 'id': Property(identifier=True), - 'created': Property(is_datetime=True), - 'label': Property(mutable=True, filterable=True), - 'size': Property(filterable=True), - 'state': Property(filterable=True), - 'filesystem': Property(), - 'updated': Property(is_datetime=True), - 'linode_id': Property(identifier=True), - } - - - def duplicate(self): - result = self._client.post(Disk.api_endpoint, model=self, data={}) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response duplicating disk!', json=result) - - d = Disk(self._client, result['id'], self.linode_id) - d._populate(result) - return d - - - def reset_root_password(self, root_password=None): - rpass = root_password - if not rpass: - from linode.objects.linode import Linode - rpass = Linode.generate_root_password() - - params = { - 'password': rpass, - } - - result = self._client.post(Disk.api_endpoint, model=self, data=params) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response duplicating disk!', json=result) - - self._populate(result) - if not root_password: - return True, rpass - return True diff --git a/linode/objects/linode/distribution.py b/linode/objects/linode/distribution.py deleted file mode 100644 index ed2c1d45f..000000000 --- a/linode/objects/linode/distribution.py +++ /dev/null @@ -1,14 +0,0 @@ -from .. import Base, Property - -class Distribution(Base): - api_name = 'distributions' - api_endpoint = '/linode/distributions/{id}' - properties = { - 'id': Property(identifier=True), - 'label': Property(filterable=True), - 'minimum_storage_size': Property(filterable=True), - 'deprecated': Property(filterable=True), - 'vendor': Property(filterable=True), - 'created': Property(is_datetime=True), - 'x64': Property(), - } diff --git a/linode/objects/linode/kernel.py b/linode/objects/linode/kernel.py deleted file mode 100644 index f4a06f267..000000000 --- a/linode/objects/linode/kernel.py +++ /dev/null @@ -1,17 +0,0 @@ -from .. import Base, Property - -class Kernel(Base): - api_name = 'kernels' - api_endpoint="/linode/kernels/{id}" - properties = { - "created": Property(is_datetime=True), - "deprecated": Property(filterable=True), - "description": Property(), - "id": Property(identifier=True), - "kvm": Property(filterable=True), - "label": Property(filterable=True), - "updates": Property(), - "version": Property(filterable=True), - "x64": Property(filterable=True), - "xen": Property(filterable=True), - } diff --git a/linode/objects/linode/linode.py b/linode/objects/linode/linode.py deleted file mode 100644 index 6fbd149f5..000000000 --- a/linode/objects/linode/linode.py +++ /dev/null @@ -1,394 +0,0 @@ -import string - -from ...errors import UnexpectedResponseError -from .. import Base, Property -from ..base import MappedObject -from .disk import Disk -from .config import Config -from .backup import Backup -from .service import Service -from .. import Region -from .distribution import Distribution -from ..networking import IPAddress -from ..networking import IPv6Address -from ..networking import IPv6Pool - -from random import choice - -class Linode(Base): - api_name = 'linodes' - api_endpoint = '/linode/instances/{id}' - properties = { - 'id': Property(identifier=True), - 'label': Property(mutable=True, filterable=True), - 'group': Property(mutable=True, filterable=True), - 'status': Property(volatile=True), - 'created': Property(is_datetime=True), - 'updated': Property(volatile=True, is_datetime=True), - 'total_transfer': Property(), - 'region': Property(relationship=Region, filterable=True), - 'alerts': Property(), - 'distribution': Property(relationship=Distribution, filterable=True), - 'disks': Property(derived_class=Disk), - 'configs': Property(derived_class=Config), - 'type': Property(relationship=Service), - 'backups': Property(), - 'ipv4': Property(), - 'ipv6': Property(), - 'hypervisor': Property(), - } - - @property - def ips(self): - """ - The ips related collection is not normalized like the others, so we have to - make an ad-hoc object to return for its response - """ - if not hasattr(self, '_ips'): - result = self._client.get("{}/ips".format(Linode.api_endpoint), model=self) - - if not "ipv4" in result: - raise UnexpectedResponseError('Unexpected response loading IPs', json=result) - - v4pub = [] - for c in result['ipv4']['public']: - i = IPAddress(self._client, c['address']) - i._populate(c) - v4pub.append(i) - - v4pri = [] - for c in result['ipv4']['private']: - i = IPAddress(self._client, c['address']) - i._populate(c) - v4pri.append(i) - - shared_ips = [] - for c in result['ipv4']['shared']: - i = IPAddress(self._client, c['address']) - i._populate(c) - shared_ips.append(i) - - v6 = [] - for c in result['ipv6']['addresses']: - i = IPv6Address(self._client, c['address']) - i._populate(c) - addresses.append(i) - - slaac = IPv6Pool(self._client, result['ipv6']['slaac']) - link_local = IPv6Pool(self._client, result['ipv6']['link_local']) - - pools = [] - for p in result['ipv6']['global']: - pools.append(IPv6Pool(self._client, p['range'])) - - ips = MappedObject(**{ - "ipv4": { - "public": v4pub, - "private": v4pri, - "shared": shared_ips, - }, - "ipv6": { - "slaac": slaac, - "link_local": link_local, - "pools": pools, - "addresses": v6, - }, - }) - - self._set('_ips', ips) - - return self._ips - - @property - def available_backups(self): - """ - The backups response contains what backups are available to be restored. - """ - if not hasattr(self, '_avail_backups'): - result = self._client.get("{}/backups".format(Linode.api_endpoint), model=self) - - if not 'daily' in result: - raise UnexpectedResponseErorr('Unexpected response loading available backups!', - json=result) - - daily = None - if result['daily']: - daily = Backup(self._client, result['daily']['id'], self.id) - daily._populate(result['daily']) - - weekly = [] - for w in result['weekly']: - cur = Backup(self._client, w['id'], self.id) - cur._populate(w) - weekly.append(w) - - snap = None - if result['snapshot']['current']: - snap = Backup(self._client, result['snapshot']['current']['id'], self.id) - snap._populate(result['snapshot']['current']) - - psnap = None - if result['snapshot']['in_progress']: - psnap = Backup(self._client, result['snapshot']['in_progress']['id'], self.id) - psnap._populate(result['snapshot']['in_progress']) - - self._set('_avail_backups', MappedObject(**{ - "daily": daily, - "weekly": weekly, - "snapshot": { - "current": snap, - "in_progress": psnap, - } - })) - - return self._avail_backups - - def _populate(self, json): - # fixes ipv4 and ipv6 attribute of json to make base._populate work - if 'ipv4' in json and 'address' in json['ipv4']: - json['ipv4']['id'] = json['ipv4']['address'] - if 'ipv6' in json and isinstance(json['ipv6'], list): - for j in json['ipv6']: - j['id'] = j['range'] - - Base._populate(self, json) - - def invalidate(self): - """ Clear out cached properties """ - if hasattr(self, '_avail_backups'): - del self._avail_backups - if hasattr(self, '_ips'): - del self._ips - - Base.invalidate(self) - - def boot(self, config=None): - resp = self._client.post("{}/boot".format(Linode.api_endpoint), model=self, data={'config': config.id} if config else None) - - if 'error' in resp: - return False - return True - - def shutdown(self): - resp = self._client.post("{}/shutdown".format(Linode.api_endpoint), model=self) - - if 'error' in resp: - return False - return True - - def reboot(self): - resp = self._client.post("{}/reboot".format(Linode.api_endpoint), model=self) - - if 'error' in resp: - return False - return True - - @staticmethod - def generate_root_password(): - return ''.join([choice('abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*_+-=') for _ in range(0, 32) ]) - - # create derived objects - def create_config(self, kernel, label=None, disks=None, **kwargs): - - disk_map = {} - if disks: - hypervisor_prefix = 'sd' if self.hypervisor == 'kvm' else 'xvd' - for i in range(0,8): - disk_map[hypervisor_prefix + string.ascii_lowercase[i]] = disks[i].id if i < len(disks) else None - - params = { - 'kernel': kernel.id if issubclass(type(kernel), Base) else kernel, - 'label': label if label else "{}_config_{}".format(self.label, len(self.configs)), - 'disks': disk_map, - } - params.update(kwargs) - - result = self._client.post("{}/configs".format(Linode.api_endpoint), model=self, data=params) - self.invalidate() - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response creating config!', json=result) - - c = Config(self._client, result['id'], self.id) - c._populate(result) - return c - - def create_disk(self, size, label=None, filesystem=None, read_only=False, distribution=None, \ - root_pass=None, root_ssh_key=None, stackscript=None, **stackscript_args): - - gen_pass = None - if distribution and not root_pass: - gen_pass = Linode.generate_root_password() - root_pass = gen_pass - - if root_ssh_key: - accepted_types = ('ssh-dss', 'ssh-rsa', 'ecdsa-sha2-nistp', 'ssh-ed25519') - if not any([ t for t in accepted_types if root_ssh_key.startswith(t) ]): - # it doesn't appear to be a key.. is it a path to the key? - import os - root_ssh_key = os.path.expanduser(root_ssh_key) - if os.path.isfile(root_ssh_key): - with open(root_ssh_key) as f: - root_ssh_key = "".join([ l.strip() for l in f ]) - else: - raise ValueError("root_ssh_key must either be a path to the key file or a " - "raw public key of one of these types: {}".format(accepted_types)) - - if distribution and not label: - label = "My {} Disk".format(distribution.label) - - params = { - 'size': size, - 'label': label if label else "{}_disk_{}".format(self.label, len(self.disks)), - 'read_only': read_only, - 'filesystem': filesystem if filesystem else 'raw', - } - - if distribution: - params.update({ - 'distribution': distribution.id if issubclass(type(distribution), Base) else distribution, - 'root_pass': root_pass, - }) - - if stackscript: - params['stackscript'] = stackscript.id - if stackscript_args: - params['stackscript_data'] = stackscript_args - - result = self._client.post("{}/disks".format(Linode.api_endpoint), model=self, data=params) - self.invalidate() - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response creating disk!', json=result) - - d = Disk(self._client, result['id'], self.id) - d._populate(result) - - if gen_pass: - return d, gen_pass - return d - - def enable_backups(self): - result = self._client.post("{}/backups/enable".format(Linode.api_endpoint), model=self) - self._populate(result) - return True - - def cancel_backups(self): - result = self._client.post("{}/backups/cancel".format(Linode.api_endpoint), model=self) - self._populate(result) - return True - - def snapshot(self, label=None): - result = self._client.post("{}/backups".format(Linode.api_endpoint), model=self, - data={ "label": label }) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response taking snapshot!', json=result) - - # so the changes show up the next time they're accessed - if hasattr(self, '_avail_backups'): - del self._avail_backups - - b = Backup(self._client, result['id'], self.id) - b._populate(result) - return b - - def allocate_ip(self, public=False): - result = self._client.post("{}/ips".format(Linode.api_endpoint), model=self, - data={ "type": "public" if public else "private" }) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response allocating IP!', json=result) - - i = IPAddress(self._client, result['id']) - i._populate(result) - return i - - def rebuild(self, distribution, root_pass=None, root_ssh_key=None, **kwargs): - ret_pass = None - if not root_pass: - ret_pass = Linode.generate_root_password() - root_pass = ret_pass - - if root_ssh_key: - accepted_types = ('ssh-dss', 'ssh-rsa', 'ecdsa-sha2-nistp', 'ssh-ed25519') - if not any([ t for t in accepted_types if root_ssh_key.startswith(t) ]): - # it doesn't appear to be a key.. is it a path to the key? - import os - root_ssh_key = os.path.expanduser(root_ssh_key) - if os.path.isfile(root_ssh_key): - with open(root_ssh_key) as f: - root_ssh_key = "".join([ l.strip() for l in f ]) - else: - raise ValueError('root_ssh_key must either be a path to the key file or a ' - 'raw public key of one of these types: {}'.format(accepted_types)) - - params = { - 'distribution': distribution.id if issubclass(type(distribution), Base) else distribution, - 'root_pass': root_pass, - } - params.update(kwargs) - - result = self._client.post('{}/rebuild'.format(Linode.api_endpoint), model=self, data=params) - - if not 'disks' in result: - raise UnexpectedResponseError('Unexpected response issuing rebuild!', json=result) - - self.invalidate() - if not ret_pass: - return True - else: - return ret_pass - - def rescue(self, *disks): - if disks: - disks = { x:y for x,y in zip(('sda','sdb'), disks) } - else: - disks=None - - result = self._client.post('{}/rescue'.format(Linode.api_endpoint), model=self, data=disks) - - return result - - def set_shared_ips(self, *ips): - """ - Takes a list of IP Addresses (either objects or strings) and attempts to - set them as the Shared IPs for this Linode - """ - params = [] - for ip in ips: - if isinstance(ip, str): - params.append(ip) - elif isinstance(ip, IPAddress): - params.append(ip.address) - else: - params.append(str(ip)) # and hope that works - - params = { - "ips": params - } - - result = self._client.post('{}/ips/sharing'.format(Linode.api_endpoint), model=self, - data=params) - - # so the changes show up next time they're accessed - if hasattr(self, '_ips'): - del self._ips - - return True - - def kvmify(self): - """ - Converts this linode to KVM from Xen - """ - ret = self._client.post('{}/kvmify'.format(Linode.api_endpoint), model=self) - - return True - - @property - def stats(self): - """ - Returns the JSON stats for this Linode - """ - # TODO - this would be nicer if we formatted the stats - return self._client.get('{}/stats'.format(Linode.api_endpoint), model=self) diff --git a/linode/objects/linode/service.py b/linode/objects/linode/service.py deleted file mode 100644 index 7c2e2c292..000000000 --- a/linode/objects/linode/service.py +++ /dev/null @@ -1,17 +0,0 @@ -from .. import Base, Property - -class Service(Base): - api_name = 'types' - api_endpoint = "/linode/types/{id}" - properties = { - 'storage': Property(filterable=True), - 'hourly_price': Property(filterable=True), - 'backups_price': Property(filterable=True), - 'id': Property(identifier=True), - 'label': Property(filterable=True), - 'mbits_out': Property(filterable=True), - 'monthly_price': Property(filterable=True), - 'ram': Property(filterable=True), - 'transfer': Property(filterable=True), - 'vcpus': Property(filterable=True), - } diff --git a/linode/objects/linode/stackscript.py b/linode/objects/linode/stackscript.py deleted file mode 100644 index 9a62e4225..000000000 --- a/linode/objects/linode/stackscript.py +++ /dev/null @@ -1,69 +0,0 @@ -from .. import Base, Property -from .distribution import Distribution - -from enum import Enum - -class UserDefinedFieldType(Enum): - text = 1 - select_one = 2 - select_many = 3 - -class UserDefinedField(): - def __init__(self, name, label, example, field_type, choices=None): - self.name = name - self.label = label - self.example = example - self.field_type = field_type - self.choices = choices - - def __repr__(self): - return "{}({}): {}".format(self.label, self.field_type.name, self.example) - -class StackScript(Base): - api_name = 'stackscripts' - api_endpoint = '/linode/stackscripts/{id}' - properties = { - "user_defined_fields": Property(), - "label": Property(mutable=True, filterable=True), - "customer_id": Property(), - "rev_note": Property(mutable=True), - "user_id": Property(), - "is_public": Property(mutable=True, filterable=True), - "created": Property(is_datetime=True), - "deployments_active": Property(), - "script": Property(mutable=True), - "distributions": Property(relationship=Distribution, mutable=True, filterable=True), - "deployments_total": Property(), - "description": Property(mutable=True, filterable=True), - "updated": Property(is_datetime=True), - } - - def _populate(self, json): - """ - Override the populate method to map user_defined_fields to - fancy values - """ - Base._populate(self, json) - - mapped_udfs = [] - for udf in self.user_defined_fields: - t = UserDefinedFieldType.text - choices = None - if hasattr(udf, 'oneof'): - t = UserDefinedFieldType.select_one - choices = udf.oneof.split(',') - elif hasattr(udf, 'manyof'): - t = UserDefinedFieldType.select_many - choices = udf.manyof.split(',') - - mapped_udfs.append(UserDefinedField(udf.name, udf.label, udf.example, t, \ - choices=choices)) - - self._set('user_defined_fields', mapped_udfs) - for d in self.distributions: - d._set("_populated", False) # these come in as partials - - def _serialize(self): - dct = Base._serialize(self) - dct['distributions'] = [ d.id for d in self.distributions ] - return dct diff --git a/linode/objects/networking/__init__.py b/linode/objects/networking/__init__.py deleted file mode 100644 index ab5ecba58..000000000 --- a/linode/objects/networking/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .ipaddress import IPAddress -from .ip6address import IPv6Address -from .ip6pool import IPv6Pool diff --git a/linode/objects/networking/ip6address.py b/linode/objects/networking/ip6address.py deleted file mode 100644 index fbc99645a..000000000 --- a/linode/objects/networking/ip6address.py +++ /dev/null @@ -1,18 +0,0 @@ -from .. import Base, Property -from ..region import Region - -class IPv6Address(Base): - api_name = 'ipv6' - api_endpoint = 'networking/ipv6/{address}' - id_attribute = 'address' - - properties = { - "address": Property(identifier=True), - "gateway": Property(), - "range": Property(), - "rdns": Property(mutable=True), - "prefix": Property(), - "subnet_mask": Property(), - "type": Property(), - "region": Property(relationship=Region), - } diff --git a/linode/objects/networking/ip6pool.py b/linode/objects/networking/ip6pool.py deleted file mode 100644 index 33e0381b8..000000000 --- a/linode/objects/networking/ip6pool.py +++ /dev/null @@ -1,11 +0,0 @@ -from .. import Base, Property - -class IPv6Pool(Base): - api_name = 'ipv6_pools' - api_endpoint = '/networking/ipv6/{}' - id_attribute = 'range' - - properties = { - 'range': Property(identifier=True), - 'region': Property(filterable=True), - } diff --git a/linode/objects/networking/ipaddress.py b/linode/objects/networking/ipaddress.py deleted file mode 100644 index 5d2b3156e..000000000 --- a/linode/objects/networking/ipaddress.py +++ /dev/null @@ -1,36 +0,0 @@ -from .. import Base, Property -from ..region import Region - -class IPAddress(Base): - api_name = 'ipv4s' - api_endpoint = '/networking/ipv4/{address}' - id_attribute = 'address' - - properties = { - "address": Property(identifier=True), - "gateway": Property(), - "subnet_mask": Property(), - "prefix": Property(), - "type": Property(), - "rdns": Property(mutable=True), - "linode_id": Property(), - "region": Property(relationship=Region, filterable=True), - } - - @property - def linode(self): - from ..linode import Linode - if not hasattr(self, '_linode'): - self._set('_linode', Linode(self._client, self.linode_id)) - return self._linode - - def to(self, linode): - """ - This is a helper method for ip-assign, and should not be used outside - of that context. It's used to cleanly build an IP Assign request with - pretty python syntax. - """ - from ..linode import Linode - if not isinstance(linode, Linode): - raise ValueError("IP Address can only be assigned to a Linode!") - return { "address": self.address, "linode_id": linode.id } diff --git a/linode/objects/nodebalancer/__init__.py b/linode/objects/nodebalancer/__init__.py deleted file mode 100644 index 7bb5039c8..000000000 --- a/linode/objects/nodebalancer/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .nodebalancer import NodeBalancer -from .config import NodeBalancerConfig diff --git a/linode/objects/nodebalancer/config.py b/linode/objects/nodebalancer/config.py deleted file mode 100644 index bc6986392..000000000 --- a/linode/objects/nodebalancer/config.py +++ /dev/null @@ -1,88 +0,0 @@ -import os - -from .. import DerivedBase, Property -from ..base import MappedObject -from .node import NodeBalancerNode - -class NodeBalancerConfig(DerivedBase): - api_name = 'configs' - api_endpoint = '/nodebalancers/{nodebalancer_id}/configs/{id}' - derived_url_path = 'configs' - parent_id_name='nodebalancer_id' - - properties = { - 'id': Property(identifier=True), - 'nodebalancer_id': Property(identifier=True), - "label": Property(mutable=True), - "port": Property(mutable=True), - "protocol": Property(mutable=True), - "algorithm": Property(mutable=True), - "stickiness": Property(mutable=True), - "check": Property(mutable=True), - "check_interval": Property(mutable=True), - "check_timeout": Property(mutable=True), - "check_attempts": Property(mutable=True), - "check_path": Property(mutable=True), - "check_body": Property(mutable=True), - "check_passive": Property(mutable=True), - "ssl_cert": Property(mutable=True), - "ssl_key": Property(mutable=True), - "cipher_suite": Property(mutable=True), - "nodes_status": Property(), - } - - @property - def nodes(self): - """ - This is a special derived_class relationship because NodeBalancerNode is the - only api object that requires two parent_ids - """ - if not hasattr(self, '_nodes'): - base_url = "{}/{}".format(NodeBalancerConfig.api_endpoint, NodeBalancerNode.derived_url_path) - result = self._client._get_objects(base_url, NodeBalancerNode, model=self, parent_id=(self.id, self.nodebalancer_id)) - - self._set('_nodes', result) - - return self._nodes - - def create_node(self, label, address, **kwargs): - params = { - "label": label, - "address": address, - } - params.update(kwargs) - - result = self._client.post("{}/nodes".format(NodeBalancerConfig.api_endpoint), model=self, data=params) - self.invalidate() - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response creating node!', json=result) - - n = NodeBalancerNode(self._client, result['id'], self.id, self.nodebalancer_id) # this is three levels deep, so we need a special constructor - n._populate(result) - return n - - def enable_ssl(self, cert, key): - """ - Enables SSL on a NodeBalancer Config (port), served using the given cert and unpassphrased - key - """ - params = {} - - params['ssl_cert'] = cert - if not 'BEGIN CERTIFICATE' in cert: - # if it doesn't look like a cert, maybe it's a path? - if os.path.isfile(os.path.expanduser(cert)): - with open(os.path.expanduser(cert)) as f: - params['ssl_cert'] = f.read() - - params['ssl_key'] = key - if not 'PRIVATE KEY' in key: - # if it doesn't look like a key, maybe it's a path? - if os.path.isfile(os.path.expanduser(key)): - with open(os.path.expanduser(key)) as f: - params['ssl_key'] = f.read() - - self._client.post('{}/ssl'.format(NodeBalancerConfig.api_endpoint), model=self, data=params) - - return True diff --git a/linode/objects/nodebalancer/node.py b/linode/objects/nodebalancer/node.py deleted file mode 100644 index ed205ed73..000000000 --- a/linode/objects/nodebalancer/node.py +++ /dev/null @@ -1,36 +0,0 @@ -from .. import DerivedBase, Property -from ..base import MappedObject - -class NodeBalancerNode(DerivedBase): - api_name = 'nodes' - api_endpoint = '/nodebalancers/{nodebalancer_id}/configs/{config_id}/nodes/{id}' - derived_url_path = 'nodes' - parent_id_name='config_id' - - properties = { - 'id': Property(identifier=True), - 'config_id': Property(identifier=True), - 'nodebalancer_id': Property(identifier=True), - "label": Property(mutable=True), - "address": Property(mutable=True), - "weight": Property(mutable=True), - "mode": Property(mutable=True), - "status": Property(), - } - - def __init__(self, client, id, parent_id, nodebalancer_id=None): - """ - We need a special constructor here because this object's parent - has a parent itself. - """ - if not nodebalancer_id and not isinstance(parent_id, tuple): - raise ValueError('NodeBalancerNode must either be created with a nodebalancer_id or a tuple of ' - '(config_id, nodebalancer_id) for parent_id!') - - if isinstance(parent_id, tuple): - nodebalancer_id = parent_id[1] - parent_id = parent_id[0] - - DerivedBase.__init__(self, client, id, parent_id) - - self._set('nodebalancer_id', nodebalancer_id) diff --git a/linode/objects/nodebalancer/nodebalancer.py b/linode/objects/nodebalancer/nodebalancer.py deleted file mode 100644 index a88edbc3a..000000000 --- a/linode/objects/nodebalancer/nodebalancer.py +++ /dev/null @@ -1,39 +0,0 @@ -from .. import Base, Property -from ..base import MappedObject -from ..networking.ipaddress import IPAddress -from ..region import Region - -from .config import NodeBalancerConfig - -class NodeBalancer(Base): - api_name = 'nodebalancers' - api_endpoint = '/nodebalancers/{id}' - properties = { - 'id': Property(identifier=True), - 'label': Property(mutable=True), - 'hostname': Property(), - 'client_conn_throttle': Property(mutable=True), - 'status': Property(), - 'created': Property(is_datetime=True), - 'updated': Property(is_datetime=True), - 'ipv4': Property(relationship=IPAddress), - 'ipv6': Property(), - 'region': Property(relationship=Region, filterable=True), - 'configs': Property(derived_class=NodeBalancerConfig), - } - - # create derived objects - def create_config(self, label=None, **kwargs): - params = kwargs - if label: - params['label'] = label - - result = self._client.post("{}/configs".format(NodeBalancer.api_endpoint), model=self, data=params) - self.invalidate() - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response creating config!', json=result) - - c = NodeBalancerConfig(self._client, result['id'], self.id) - c._populate(result) - return c diff --git a/linode/objects/region.py b/linode/objects/region.py deleted file mode 100644 index 03e7e3eac..000000000 --- a/linode/objects/region.py +++ /dev/null @@ -1,9 +0,0 @@ -from .base import Base, Property - -class Region(Base): - api_name = 'regions' - api_endpoint = "/regions/{id}" - properties = { - 'id': Property(identifier=True), - 'label': Property(filterable=True), - } diff --git a/linode/objects/support/__init__.py b/linode/objects/support/__init__.py deleted file mode 100644 index dc68578d8..000000000 --- a/linode/objects/support/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .ticket import SupportTicket -from .reply import TicketReply diff --git a/linode/objects/support/reply.py b/linode/objects/support/reply.py deleted file mode 100644 index 3b36f13ed..000000000 --- a/linode/objects/support/reply.py +++ /dev/null @@ -1,16 +0,0 @@ -from .. import DerivedBase, Property - -class TicketReply(DerivedBase): - api_name = 'replies' - api_endpoint = '/support/tickets/{ticket_id}/replies' - derived_url_path = 'replies' - parent_id_name='ticket_id' - - properties = { - 'id': Property(identifier=True), - 'ticket_id': Property(identifier=True), - 'description': Property(), - 'created': Property(is_datetime=True), - 'created_by': Property(), - 'from_linode': Property(), - } diff --git a/linode/objects/support/ticket.py b/linode/objects/support/ticket.py deleted file mode 100644 index 599920d16..000000000 --- a/linode/objects/support/ticket.py +++ /dev/null @@ -1,83 +0,0 @@ -import requests - -from .. import Base, Property -from .. import Linode, Domain -from linode.objects.nodebalancer.nodebalancer import NodeBalancer -from ...errors import ApiError -from .reply import TicketReply - -class SupportTicket(Base): - api_name = 'tickets' - api_endpoint = '/support/tickets/{id}' - properties = { - 'id': Property(identifier=True), - 'summary': Property(), - 'description': Property(), - 'status': Property(filterable=True), - 'entity': Property(), - 'opened': Property(is_datetime=True), - 'closed': Property(is_datetime=True), - 'closed_by': Property(), - 'updated': Property(is_datetime=True), - 'updated_by': Property(), - 'replies': Property(derived_class=TicketReply), - } - - @property - def linode(self): - if self.entity and self.entity.type == 'linode': - return Linode(self._client, self.entity.id) - return None - - @property - def domain(self): - if self.entity and self.entity.type == 'domain': - return Domain(self._client, self.entity.id) - return None - - @property - def nodebalancer(self): - if self.entity and self.entity.type == 'nodebalancer': - return NodeBalancer(self._client, self.entity.id) - return None - - def post_reply(self, description): - """ - """ - result = self._client.post("{}/replies".format(SupportTicket.api_endpoint), model=self, data={ - "description": description, - }) - - if not 'id' in result: - raise UnexpectedResponseError('Unexpected response when creating ticket reply!', - json=result) - - r = TicketReply(self._client, result['id'], self.id) - r._populate(result) - return r - - def upload_attachment(self, attachment): - content = None - with open(attachment) as f: - content = f.read() - - if not content: - raise ValueError('Nothing to upload!') - - headers = { - "Authorization": "token {}".format(self._client.token), - "Content-type": "multipart/form-data", - } - - result = requests.post('{}{}/attachments'.format(self._client.base_url, - SupportTicket.api_endpoint.format(id=self.id)), - headers=headers, files=content) - - if not result.status_code == 200: - errors = [] - j = result.json() - if 'errors' in j: - errors = [ e['reason'] for e in j['errors'] ] - raise ApiError('{}: {}'.format(result.status_code, errors), json=j) - - return True diff --git a/linode/util/__init__.py b/linode/util/__init__.py deleted file mode 100644 index c38b079ff..000000000 --- a/linode/util/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from linode.util.paginated_list import PaginatedList diff --git a/linode/util/paginated_list.py b/linode/util/paginated_list.py deleted file mode 100644 index 41cf44eb0..000000000 --- a/linode/util/paginated_list.py +++ /dev/null @@ -1,139 +0,0 @@ -import math - -class PaginatedList(object): - def __init__(self, client, page_endpoint, page=[], max_pages=1, total_items=None, - parent_id=None, key=None): - self.client = client - self.page_endpoint = page_endpoint - self.key = key if key else page_endpoint - self.page_size = len(page) - self.max_pages = max_pages - self.lists = [ None for i in range(0, self.max_pages) ] - self.lists[0] = page - self.list_cls = type(page[0]) if page else None # TODO if this is None that's bad - self.objects_parent_id = parent_id - self.cur = 0 # for being a generator - - self.total_items = total_items - if not total_items: - self.total_items = len(page) - - def first(self): - return self[0] - - def last(self): - return self[-1] - - def only(self): - if len(self) == 1: - return self[0] - raise ValueError("List {} has more than one element!".format(self)) - - def __repr__(self): - return "PaginatedList ({} items)".format(self.total_items) - - def _load_page(self, page_number): - from linode import mappings - - j = self.client.get("/{}?page={}".format(self.page_endpoint, page_number+1)) - - if j['total_pages'] != self.max_pages or j['total_results'] != len(self): - raise RuntimeError('List {} has changed since creation!'.format(self)) - - l = mappings.make_list(j[self.key], self.client, parent_id=self.objects_parent_id, cls=self.list_cls) - self.lists[page_number] = l - - def __getitem__(self, index): - # this comes in here now, but we're hadling it elsewhere - if isinstance(index, slice): - return self._get_slice(index) - - # handle negative indexing - if index < 0: - index = len(self) + index - if index < 0: - raise IndexError('list index out of range') - - if index >= self.page_size * self.max_pages: - raise IndexError('list index out of range') - normalized_index = index % self.page_size - target_page = math.ceil((index+1.0)/self.page_size)-1 - target_page = int(target_page) - - if not self.lists[target_page]: - self._load_page(target_page) - - return self.lists[target_page][normalized_index] - - def __len__(self): - return self.total_items - - def _get_slice(self, s): - i = s.start if s.start else 0 - j = s.stop if s.stop else self.total_items - - if not s.step is None and not s.step == 1: - raise NotImplementedError('TODO') - - if i < 0 and j < 0: - i = len(self) + i - j = len(self) + j - - if i < 0 and not s.stop: - i = len(self) + i - - if j < 0 and not s.start: - j = len(self) + j - - if i > j: - raise NotImplementedError('TODO') - - if i < 0 or j < 0: - # TODO - this should probably not raise - raise IndexError('list index out of range') - - if i > self.page_size * self.max_pages: - i = self.page_size * self.max_pages - 1 - - if j > self.page_size * self.max_pages: - j = self.page_size * self.max_pages - 1 - - i_normalized = i % self.page_size - j_normalized = j % self.page_size - i_page = math.ceil((i+1)/self.page_size)-1 - j_page = math.ceil((j+1)/self.page_size)-1 - - if not self.lists[i_page]: - self._load_page(i_page) - if not self.lists[j_page]: - self._load_page(j_page) - - # if we're entirely in one list, this is easy - if i_page == j_page: - return self.lists[i_page][i_normalized:j_normalized] - - ret = self.lists[i_page][i_normalized:] - - for page in range(i_page, j_page): - if not self.lists[page]: - self._load_page(page) - - if page != i_page and page != j_page: - ret += self.lists[page] - - ret += self.lists[j_page][:j_normalized] - - return ret - - def __setitem__(self, index, value): - raise AttributeError('Assigning to indicies in paginated lists is not supported') - - def __delitem__(self, index, value): - raise AttributeError('Assigning to indicies in paginated lists is not supported') - - def __next__(self): - if self.cur < len(self): - self.cur += 1 - return self[self.cur-1] - else: - raise StopIteration() diff --git a/linode_api4/__init__.py b/linode_api4/__init__.py new file mode 100644 index 000000000..69fa1111c --- /dev/null +++ b/linode_api4/__init__.py @@ -0,0 +1,7 @@ +# isort: skip_file +from linode_api4.objects import * +from linode_api4.errors import ApiError, UnexpectedResponseError +from linode_api4.linode_client import LinodeClient, MonitorClient +from linode_api4.login_client import LinodeLoginClient, OAuthScopes +from linode_api4.paginated_list import PaginatedList +from linode_api4.polling import EventPoller diff --git a/linode_api4/common.py b/linode_api4/common.py new file mode 100644 index 000000000..ac77d2a05 --- /dev/null +++ b/linode_api4/common.py @@ -0,0 +1,83 @@ +from dataclasses import dataclass +from pathlib import Path + +from linode_api4.objects import JSONObject + +SSH_KEY_TYPES = ( + "ssh-dss", + "ssh-rsa", + "ssh-ed25519", + "ecdsa-sha2-nistp256", + "ecdsa-sha2-nistp384", + "ecdsa-sha2-nistp521", +) + + +def load_and_validate_keys(authorized_keys): + """ + Loads authorized_keys as taken by :any:`instance_create`, + :any:`disk_create` or :any:`rebuild`, and loads in any keys from any files + provided. + + :param authorized_keys: A list of keys or paths to keys, or a single key + + :returns: A list of raw keys + :raises: ValueError if keys in authorized_keys don't appear to be a raw + key and can't be opened. + """ + if not authorized_keys: + return None + + if not isinstance(authorized_keys, list): + authorized_keys = [authorized_keys] + + ret = [] + + for k in authorized_keys: + accepted_types = ( + "ssh-dss", + "ssh-rsa", + "ecdsa-sha2-nistp", + "ssh-ed25519", + ) + if any( + [t for t in accepted_types if k.startswith(t)] + ): # pylint: disable=use-a-generator + # this looks like a key, cool + ret.append(k) + else: + # it doesn't appear to be a key.. is it a path to the key? + k_path = Path(k).expanduser() + if k_path.is_file(): + with open(k_path) as f: + ret.append(f.read().rstrip()) + else: + raise ValueError( + "authorized_keys must either be paths " + "to the key files or a list of raw " + "public key of one of these types: {}".format( + accepted_types + ) + ) + return ret + + +@dataclass +class Price(JSONObject): + """ + Price contains the core fields of a price object returned by various pricing endpoints. + """ + + hourly: int = 0 + monthly: int = 0 + + +@dataclass +class RegionPrice(JSONObject): + """ + RegionPrice contains the core fields of a region_price object returned by various pricing endpoints. + """ + + id: int = 0 + hourly: int = 0 + monthly: int = 0 diff --git a/linode_api4/errors.py b/linode_api4/errors.py new file mode 100644 index 000000000..511ac8c57 --- /dev/null +++ b/linode_api4/errors.py @@ -0,0 +1,149 @@ +# Necessary to maintain compatibility with Python < 3.11 +from __future__ import annotations + +from builtins import super +from json import JSONDecodeError +from typing import Any, Dict, Optional + +from requests import Response + + +class ApiError(RuntimeError): + """ + An API Error is any error returned from the API. These + typically have a status code in the 400s or 500s. Most + often, this will be caused by invalid input to the API. + """ + + def __init__( + self, + message: str, + status: int = 400, + json: Optional[Dict[str, Any]] = None, + response: Optional[Response] = None, + ): + super().__init__(message) + + self.status = status + self.json = json + self.response = response + + self.errors = [] + + if json and "errors" in json and isinstance(json["errors"], list): + self.errors = [e["reason"] for e in json["errors"]] + + @classmethod + def from_response( + cls, + response: Response, + message: Optional[str] = None, + disable_formatting: bool = False, + ) -> Optional[ApiError]: + """ + Creates an ApiError object from the given response, + or None if the response does not contain an error. + + :arg response: The response to create an ApiError from. + :arg message: An optional message to prepend to the error's message. + :arg disable_formatting: If true, the error's message will not automatically be formatted + with details from the API response. + + :returns: The new API error. + """ + + if response.status_code < 400 or response.status_code > 599: + # No error was found + return None + + request = response.request + + try: + response_json = response.json() + except JSONDecodeError: + response_json = None + + # Use the user-defined message is formatting is disabled + if disable_formatting: + return cls( + message, + status=response.status_code, + json=response_json, + response=response, + ) + + # Build the error string + error_fmt = "N/A" + + if response_json is not None and "errors" in response_json: + errors = [] + + for error in response_json["errors"]: + field = error.get("field") + reason = error.get("reason") + errors.append(f"{field + ': ' if field else ''}{reason}") + + error_fmt = "; ".join(errors) + + elif len(response.text or "") > 0: + error_fmt = response.text + + return cls( + ( + f"{message + ': ' if message is not None else ''}" + f"{f'{request.method} {request.path_url}: ' if request else ''}" + f"[{response.status_code}] {error_fmt}" + ), + status=response.status_code, + json=response_json, + response=response, + ) + + +class UnexpectedResponseError(RuntimeError): + """ + An Unexpected Response Error occurs when the API returns + something that this library is unable to parse, usually + because it expected something specific and didn't get it. + These typically indicate an oversight in developing this + library, and should be fixed with changes to this codebase. + """ + + def __init__( + self, + message: str, + status: int = 200, + json: Optional[Dict[str, Any]] = None, + response: Optional[Response] = None, + ): + super().__init__(message) + + self.status = status + self.json = json + self.response = response + + @classmethod + def from_response( + cls, + message: str, + response: Response, + ) -> Optional[UnexpectedResponseError]: + """ + Creates an UnexpectedResponseError object from the given response and message. + + :arg message: The message to create this error with. + :arg response: The response to create an UnexpectedResponseError from. + :returns: The new UnexpectedResponseError. + """ + + try: + response_json = response.json() + except JSONDecodeError: + response_json = None + + return cls( + message, + status=response.status_code, + json=response_json, + response=response, + ) diff --git a/linode_api4/groups/__init__.py b/linode_api4/groups/__init__.py new file mode 100644 index 000000000..c835972bc --- /dev/null +++ b/linode_api4/groups/__init__.py @@ -0,0 +1,28 @@ +# Group needs to be imported first +from .group import * # isort: skip + +from .account import * +from .beta import * +from .database import * +from .domain import * +from .image import * +from .image_share_group import * +from .linode import * +from .lke import * +from .lke_tier import * +from .lock import * +from .longview import * +from .maintenance import * +from .monitor import * +from .monitor_api import * +from .networking import * +from .nodebalancer import * +from .object_storage import * +from .placement import * +from .polling import * +from .profile import * +from .region import * +from .support import * +from .tag import * +from .volume import * +from .vpc import * diff --git a/linode_api4/groups/account.py b/linode_api4/groups/account.py new file mode 100644 index 000000000..6f8c6528e --- /dev/null +++ b/linode_api4/groups/account.py @@ -0,0 +1,512 @@ +from typing import Union + +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import ( + Account, + AccountAvailability, + AccountBetaProgram, + AccountSettings, + BetaProgram, + ChildAccount, + Event, + Invoice, + Login, + MappedObject, + OAuthClient, + Payment, + PaymentMethod, + ServiceTransfer, + User, +) + + +class AccountGroup(Group): + """ + Collections related to your account. + """ + + def __call__(self): + """ + Retrieves information about the acting user's account, such as billing + information. This is intended to be called off of the :any:`LinodeClient` + class, like this:: + + account = client.account() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-account + + :returns: Returns the acting user's account information. + :rtype: Account + """ + result = self.client.get("/account") + + if not "email" in result: + raise UnexpectedResponseError( + "Unexpected response when getting account!", json=result + ) + + return Account(self.client, result["email"], result) + + def events(self, *filters): + """ + Lists events on the current account matching the given filters. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-events + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of events on the current account matching the given filters. + :rtype: PaginatedList of Event + """ + + return self.client._get_and_filter(Event, *filters) + + def events_mark_seen(self, event): + """ + Marks event as the last event we have seen. If event is an int, it is treated + as an event_id, otherwise it should be an event object whose id will be used. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-event-seen + + :param event: The Linode event to mark as seen. + :type event: Event or int + """ + last_seen = event if isinstance(event, int) else event.id + self.client.post( + "{}/seen".format(Event.api_endpoint), + model=Event(self.client, last_seen), + ) + + def settings(self): + """ + Returns the account settings data for this acocunt. This is not a + listing endpoint. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-account-settings + + :returns: The account settings data for this account. + :rtype: AccountSettings + """ + result = self.client.get("/account/settings") + + if not "managed" in result: + raise UnexpectedResponseError( + "Unexpected response when getting account settings!", + json=result, + ) + + s = AccountSettings(self.client, result["managed"], result) + return s + + def invoices(self, *filters): + """ + Returns Invoices issued to this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-invoices + + :param filters: Any number of filters to apply to this query. + + :returns: Invoices issued to this account. + :rtype: PaginatedList of Invoice + """ + return self.client._get_and_filter(Invoice, *filters) + + def payments(self, *filters): + """ + Returns a list of Payments made on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-payments + + :returns: A list of payments made on this account. + :rtype: PaginatedList of Payment + """ + return self.client._get_and_filter(Payment, *filters) + + def oauth_clients(self, *filters): + """ + Returns the OAuth Clients associated with this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-clients + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of OAuth Clients associated with this account. + :rtype: PaginatedList of OAuthClient + """ + return self.client._get_and_filter(OAuthClient, *filters) + + def oauth_client_create(self, name, redirect_uri, **kwargs): + """ + Creates a new OAuth client. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-client + + :param name: The name of this application. + :type name: str + :param redirect_uri: The location a successful log in from https://login.linode.com should be redirected to for this client. + :type redirect_uri: str + + :returns: The created OAuth Client. + :rtype: OAuthClient + """ + params = { + "label": name, + "redirect_uri": redirect_uri, + } + params.update(kwargs) + + result = self.client.post("/account/oauth-clients", data=params) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating OAuth Client!", json=result + ) + + c = OAuthClient(self.client, result["id"], result) + return c + + def users(self, *filters): + """ + Returns a list of users on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-users + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of users on this account. + :rtype: PaginatedList of User + """ + return self.client._get_and_filter(User, *filters) + + def logins(self): + """ + Returns a collection of successful logins for all users on the account during the last 90 days. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-account-logins + + :returns: A list of Logins on this account. + :rtype: PaginatedList of Login + """ + + return self.client._get_and_filter(Login) + + def maintenance(self): + """ + Returns a collection of Maintenance objects for any entity a user has permissions to view. Cancelled Maintenance objects are not returned. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-maintenance + + :returns: A list of Maintenance objects on this account. + :rtype: List of Maintenance objects as MappedObjects + """ + + result = self.client.get( + "{}/maintenance".format(Account.api_endpoint), model=self + ) + + return [MappedObject(**r) for r in result["data"]] + + def payment_methods(self): + """ + Returns a list of Payment Methods for this Account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-payment-methods + + :returns: A list of Payment Methods on this account. + :rtype: PaginatedList of PaymentMethod + """ + + return self.client._get_and_filter(PaymentMethod) + + def add_payment_method(self, data, is_default, type): + """ + Adds a Payment Method to your Account with the option to set it as the default method. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-payment-method + + :param data: An object representing the credit card information you have on file with + Linode to make Payments against your Account. + :type data: dict + + Example usage:: + data = { + "card_number": "4111111111111111", + "expiry_month": 11, + "expiry_year": 2020, + "cvv": "111" + } + + :param is_default: Whether this Payment Method is the default method for + automatically processing service charges. + :type is_default: bool + + :param type: The type of Payment Method. Enum: ["credit_card] + :type type: str + """ + + if type != "credit_card": + raise ValueError("Unknown Payment Method type: {}".format(type)) + + if ( + "card_number" not in data + or "expiry_month" not in data + or "expiry_year" not in data + or "cvv" not in data + or not data + ): + raise ValueError("Invalid credit card info provided") + + params = {"data": data, "type": type, "is_default": is_default} + + resp = self.client.post( + "{}/payment-methods".format(Account.api_endpoint), + model=self, + data=params, + ) + + if "error" in resp: + raise UnexpectedResponseError( + "Unexpected response when adding payment method!", + json=resp, + ) + + def notifications(self): + """ + Returns a collection of Notification objects representing important, often time-sensitive items related to your Account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-notifications + + :returns: A list of Notifications on this account. + :rtype: List of Notification objects as MappedObjects + """ + + result = self.client.get( + "{}/notifications".format(Account.api_endpoint), model=self + ) + + return [MappedObject(**r) for r in result["data"]] + + def linode_managed_enable(self): + """ + Enables Linode Managed for the entire account and sends a welcome email to the accountโ€™s associated email address. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-enable-account-managed + """ + + resp = self.client.post( + "{}/settings/managed-enable".format(Account.api_endpoint), + model=self, + ) + + if "error" in resp: + raise UnexpectedResponseError( + "Unexpected response when enabling Linode Managed!", + json=resp, + ) + + def add_promo_code(self, promo_code): + """ + Adds an expiring Promo Credit to your account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-promo-credit + + :param promo_code: The Promo Code. + :type promo_code: str + """ + + params = { + "promo_code": promo_code, + } + + resp = self.client.post( + "{}/promo-codes".format(Account.api_endpoint), + model=self, + data=params, + ) + + if "error" in resp: + raise UnexpectedResponseError( + "Unexpected response when adding Promo Code!", + json=resp, + ) + + def service_transfers(self, *filters): + """ + Returns a collection of all created and accepted Service Transfers for this account, regardless of the user that created or accepted the transfer. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-service-transfers + + :returns: A list of Service Transfers on this account. + :rtype: PaginatedList of ServiceTransfer + """ + + return self.client._get_and_filter(ServiceTransfer, *filters) + + def service_transfer_create(self, entities): + """ + Creates a transfer request for the specified services. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-service-transfer + + :param entities: A collection of the services to include in this transfer request, separated by type. + :type entities: dict + + Example usage:: + entities = { + "linodes": [ + 111, + 222 + ] + } + """ + + if not entities: + raise ValueError("Entities must be provided!") + + bad_entries = [ + k for k, v in entities.items() if not isinstance(v, list) + ] + if len(bad_entries) > 0: + raise ValueError( + f"Got unexpected type for entity lists: {', '.join(bad_entries)}" + ) + + params = {"entities": entities} + + resp = self.client.post( + "{}/service-transfers".format(Account.api_endpoint), + model=self, + data=params, + ) + + if "error" in resp: + raise UnexpectedResponseError( + "Unexpected response when creating Service Transfer!", + json=resp, + ) + + def transfer(self): + """ + Returns a MappedObject containing the account's transfer pool data. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-transfer + + :returns: Information about this account's transfer pool data. + :rtype: MappedObject + """ + result = self.client.get("/account/transfer") + + if not "used" in result: + raise UnexpectedResponseError( + "Unexpected response when getting Transfer Pool!" + ) + + return MappedObject(**result) + + def user_create(self, email, username, restricted=True): + """ + Creates a new user on your account. If you create an unrestricted user, + they will immediately be able to access everything on your account. If + you create a restricted user, you must grant them access to parts of your + account that you want to allow them to manage (see :any:`User.grants` for + details). + + The new user will receive an email inviting them to set up their password. + This must be completed before they can log in. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-user + + :param email: The new user's email address. This is used to finish setting + up their user account. + :type email: str + :param username: The new user's unique username. They will use this username + to log in. + :type username: str + :param restricted: If True, the new user must be granted access to parts of + the account before they can do anything. If False, the + new user will immediately be able to manage the entire + account. Defaults to True. + :type restricted: True + + :returns The new User. + :rtype: User + """ + params = { + "email": email, + "username": username, + "restricted": restricted, + } + result = self.client.post("/account/users", data=params) + + if not all( + [c in result for c in ("email", "restricted", "username")] + ): # pylint: disable=use-a-generator + raise UnexpectedResponseError( + "Unexpected response when creating user!", json=result + ) + + u = User(self.client, result["username"], result) + return u + + def enrolled_betas(self, *filters): + """ + Returns a list of all Beta Programs an account is enrolled in. + + API doc: https://techdocs.akamai.com/linode-api/reference/get-enrolled-beta-programs + + :returns: a list of Beta Programs. + :rtype: PaginatedList of AccountBetaProgram + """ + return self.client._get_and_filter(AccountBetaProgram, *filters) + + def join_beta_program(self, beta: Union[str, BetaProgram]): + """ + Enrolls an account into a beta program. + + API doc: https://techdocs.akamai.com/linode-api/reference/post-beta-program + + :param beta: The object or id of a beta program to join. + :type beta: BetaProgram or str + + :returns: A boolean indicating whether the account joined a beta program successfully. + :rtype: bool + """ + + self.client.post( + "/account/betas", + data={"id": beta.id if isinstance(beta, BetaProgram) else beta}, + ) + + return True + + def availabilities(self, *filters): + """ + Returns a list of all available regions and the resource types which are available + to the account. + + API doc: https://techdocs.akamai.com/linode-api/reference/get-account-availability + + :returns: a list of region availability information. + :rtype: PaginatedList of AccountAvailability + """ + return self.client._get_and_filter(AccountAvailability, *filters) + + def child_accounts(self, *filters): + """ + Returns a list of all child accounts under the this parent account. + + NOTE: Parent/Child related features may not be generally available. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-child-accounts + + :returns: a list of all child accounts. + :rtype: PaginatedList of ChildAccount + """ + return self.client._get_and_filter(ChildAccount, *filters) diff --git a/linode_api4/groups/beta.py b/linode_api4/groups/beta.py new file mode 100644 index 000000000..a44fd492d --- /dev/null +++ b/linode_api4/groups/beta.py @@ -0,0 +1,24 @@ +from linode_api4.groups import Group +from linode_api4.objects import BetaProgram + + +class BetaProgramGroup(Group): + """ + This group encapsulates all endpoints under /betas, including viewing + available active beta programs. + """ + + def betas(self, *filters): + """ + Returns a list of available active Beta Programs. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-beta-programs + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Beta Programs that matched the query. + :rtype: PaginatedList of BetaProgram + """ + return self.client._get_and_filter(BetaProgram, *filters) diff --git a/linode_api4/groups/database.py b/linode_api4/groups/database.py new file mode 100644 index 000000000..9546100a8 --- /dev/null +++ b/linode_api4/groups/database.py @@ -0,0 +1,386 @@ +from typing import Any, Dict, Union + +from linode_api4 import ( + MySQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigOptions, +) +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import ( + Database, + DatabaseEngine, + DatabasePrivateNetwork, + DatabaseType, + MySQLDatabase, + PostgreSQLDatabase, + drop_null_keys, +) +from linode_api4.objects.base import _flatten_request_body_recursive + + +class DatabaseGroup(Group): + """ + Encapsulates Linode Managed Databases related methods of the :any:`LinodeClient`. This + should not be instantiated on its own, but should instead be used through + an instance of :any:`LinodeClient`:: + + client = LinodeClient(token) + instances = client.database.instances() # use the DatabaseGroup + + This group contains all features beneath the `/databases` group in the API v4. + """ + + def types(self, *filters): + """ + Returns a list of Linode Database-compatible Instance types. + These may be used to create Managed Databases, or simply + referenced to on their own. DatabaseTypes can be + filtered to return specific types, for example:: + + database_types = client.database.types(DatabaseType.deprecated == False) + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-types + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of types that match the query. + :rtype: PaginatedList of DatabaseType + """ + return self.client._get_and_filter(DatabaseType, *filters) + + def engines(self, *filters): + """ + Returns a list of Linode Managed Database Engines. + These may be used to create Managed Databases, or simply + referenced to on their own. Engines can be filtered to + return specific engines, for example:: + + mysql_engines = client.database.engines(DatabaseEngine.engine == 'mysql') + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-engines + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of types that match the query. + :rtype: PaginatedList of DatabaseEngine + """ + return self.client._get_and_filter(DatabaseEngine, *filters) + + def mysql_config_options(self): + """ + Returns a detailed list of all the configuration options for MySQL Databases. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-mysql-config + + :returns: The JSON configuration options for MySQL Databases. + """ + return self.client.get("/databases/mysql/config", model=self) + + def postgresql_config_options(self): + """ + Returns a detailed list of all the configuration options for PostgreSQL Databases. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-postgresql-config + + :returns: The JSON configuration options for PostgreSQL Databases. + """ + return self.client.get("/databases/postgresql/config", model=self) + + def instances(self, *filters): + """ + Returns a list of Managed Databases active on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-instances + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of databases that matched the query. + :rtype: PaginatedList of Database + """ + return self.client._get_and_filter(Database, *filters) + + def mysql_instances(self, *filters): + """ + Returns a list of Managed MySQL Databases active on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-mysql-instances + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of MySQL databases that matched the query. + :rtype: PaginatedList of MySQLDatabase + """ + return self.client._get_and_filter(MySQLDatabase, *filters) + + def mysql_create( + self, + label, + region, + engine, + ltype, + engine_config: Union[MySQLDatabaseConfigOptions, Dict[str, Any]] = None, + private_network: Union[DatabasePrivateNetwork, Dict[str, Any]] = None, + **kwargs, + ): + """ + Creates an :any:`MySQLDatabase` on this account with + the given label, region, engine, and node type. For example:: + + client = LinodeClient(TOKEN) + + # look up Region and Types to use. In this example I'm just using + # the first ones returned. + region = client.regions().first() + node_type = client.database.types()[0] + engine = client.database.engines(DatabaseEngine.engine == 'mysql')[0] + + new_database = client.database.mysql_create( + "example-database", + region, + engine.id, + type.id + ) + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-databases-mysql-instances + + :param label: The name for this cluster + :type label: str + :param region: The region to deploy this cluster in + :type region: str or Region + :param engine: The engine to deploy this cluster with + :type engine: str or Engine + :param ltype: The Linode Type to use for this cluster + :type ltype: str or Type + :param engine_config: The configuration options for this MySQL cluster + :type engine_config: Dict[str, Any] or MySQLDatabaseConfigOptions + :param private_network: The private network settings to use for this cluster + :type private_network: Dict[str, Any] or DatabasePrivateNetwork + """ + + params = { + "label": label, + "region": region, + "engine": engine, + "type": ltype, + "engine_config": engine_config, + "private_network": private_network, + } + params.update(kwargs) + + result = self.client.post( + "/databases/mysql/instances", + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + if "id" not in result: + raise UnexpectedResponseError( + "Unexpected response when creating MySQL Database", json=result + ) + + d = MySQLDatabase(self.client, result["id"], result) + return d + + def mysql_fork(self, source, restore_time, **kwargs): + """ + Forks an :any:`MySQLDatabase` on this account with + the given restore_time. label, region, engine, and ltype are optional. + For example:: + + client = LinodeClient(TOKEN) + + db_to_fork = client.database.mysql_instances()[0] + + new_fork = client.database.mysql_fork( + db_to_fork.id, + db_to_fork.updated, + label="new-fresh-label" + ) + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-databases-mysql-instances + + :param source: The id of the source database + :type source: int + :param restore_time: The timestamp for the fork + :type restore_time: datetime + :param label: The name for this cluster + :type label: str + :param region: The region to deploy this cluster in + :type region: str | Region + :param engine: The engine to deploy this cluster with + :type engine: str | Engine + :param ltype: The Linode Type to use for this cluster + :type ltype: str | Type + """ + + params = { + "fork": { + "source": source, + "restore_time": restore_time.strftime("%Y-%m-%dT%H:%M:%S"), + } + } + if "ltype" in kwargs: + params["type"] = kwargs["ltype"] + params.update(kwargs) + + result = self.client.post( + "/databases/mysql/instances", + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + if "id" not in result: + raise UnexpectedResponseError( + "Unexpected response when creating MySQL Database", json=result + ) + + d = MySQLDatabase(self.client, result["id"], result) + return d + + def postgresql_instances(self, *filters): + """ + Returns a list of Managed PostgreSQL Databases active on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-postgre-sql-instances + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of PostgreSQL databases that matched the query. + :rtype: PaginatedList of PostgreSQLDatabase + """ + return self.client._get_and_filter(PostgreSQLDatabase, *filters) + + def postgresql_create( + self, + label, + region, + engine, + ltype, + engine_config: Union[ + PostgreSQLDatabaseConfigOptions, Dict[str, Any] + ] = None, + private_network: Union[DatabasePrivateNetwork, Dict[str, Any]] = None, + **kwargs, + ): + """ + Creates an :any:`PostgreSQLDatabase` on this account with + the given label, region, engine, and node type. For example:: + + client = LinodeClient(TOKEN) + + # look up Region and Types to use. In this example I'm just using + # the first ones returned. + region = client.regions().first() + node_type = client.database.types()[0] + engine = client.database.engines(DatabaseEngine.engine == 'postgresql')[0] + + new_database = client.database.postgresql_create( + "example-database", + region, + engine.id, + type.id + ) + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-databases-postgre-sql-instances + + :param label: The name for this cluster + :type label: str + :param region: The region to deploy this cluster in + :type region: str or Region + :param engine: The engine to deploy this cluster with + :type engine: str or Engine + :param ltype: The Linode Type to use for this cluster + :type ltype: str or Type + :param engine_config: The configuration options for this PostgreSQL cluster + :type engine_config: Dict[str, Any] or PostgreSQLDatabaseConfigOptions + :param private_network: The private network settings to use for this cluster + :type private_network: Dict[str, Any] or DatabasePrivateNetwork + """ + + params = { + "label": label, + "region": region, + "engine": engine, + "type": ltype, + "engine_config": engine_config, + "private_network": private_network, + } + params.update(kwargs) + + result = self.client.post( + "/databases/postgresql/instances", + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + if "id" not in result: + raise UnexpectedResponseError( + "Unexpected response when creating PostgreSQL Database", + json=result, + ) + + d = PostgreSQLDatabase(self.client, result["id"], result) + return d + + def postgresql_fork(self, source, restore_time, **kwargs): + """ + Forks an :any:`PostgreSQLDatabase` on this account with + the given restore_time. label, region, engine, and ltype are optional. + For example:: + + client = LinodeClient(TOKEN) + + db_to_fork = client.database.postgresql_instances()[0] + + new_fork = client.database.postgresql_fork( + db_to_fork.id, + db_to_fork.updated, + label="new-fresh-label" + ) + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-databases-postgresql-instances + + :param source: The id of the source database + :type source: int + :param restore_time: The timestamp for the fork + :type restore_time: datetime + :param label: The name for this cluster + :type label: str + :param region: The region to deploy this cluster in + :type region: str | Region + :param engine: The engine to deploy this cluster with + :type engine: str | Engine + :param ltype: The Linode Type to use for this cluster + :type ltype: str | Type + """ + + params = { + "fork": { + "source": source, + "restore_time": restore_time.strftime("%Y-%m-%dT%H:%M:%S"), + } + } + if "ltype" in kwargs: + params["type"] = kwargs["ltype"] + params.update(kwargs) + + result = self.client.post( + "/databases/postgresql/instances", + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + if "id" not in result: + raise UnexpectedResponseError( + "Unexpected response when creating PostgreSQL Database", + json=result, + ) + + d = PostgreSQLDatabase(self.client, result["id"], result) + return d diff --git a/linode_api4/groups/domain.py b/linode_api4/groups/domain.py new file mode 100644 index 000000000..95bd3c838 --- /dev/null +++ b/linode_api4/groups/domain.py @@ -0,0 +1,61 @@ +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import Domain + + +class DomainGroup(Group): + def __call__(self, *filters): + """ + Retrieves all of the Domains the acting user has access to. + + This is intended to be called off of the :any:`LinodeClient` + class, like this:: + + domains = client.domains() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-domains + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Domains the acting user can access. + :rtype: PaginatedList of Domain + """ + return self.client._get_and_filter(Domain, *filters) + + def create(self, domain, master=True, **kwargs): + """ + Registers a new Domain on the acting user's account. Make sure to point + your registrar to Linode's nameservers so that Linode's DNS manager will + correctly serve your domain. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-domain + + :param domain: The domain to register to Linode's DNS manager. + :type domain: str + :param master: Whether this is a master (defaults to true) + :type master: bool + :param tags: A list of tags to apply to the new domain. If any of the + tags included do not exist, they will be created as part of + this operation. + :type tags: list[str] + + :returns: The new Domain object. + :rtype: Domain + """ + params = { + "domain": domain, + "type": "master" if master else "slave", + } + params.update(kwargs) + + result = self.client.post("/domains", data=params) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating Domain!", json=result + ) + + d = Domain(self.client, result["id"], result) + return d diff --git a/linode_api4/groups/group.py b/linode_api4/groups/group.py new file mode 100644 index 000000000..b7c0e1eeb --- /dev/null +++ b/linode_api4/groups/group.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from linode_api4.linode_client import BaseClient + + +class Group: + def __init__(self, client: BaseClient): + self.client = client diff --git a/linode_api4/groups/image.py b/linode_api4/groups/image.py new file mode 100644 index 000000000..fda56fb0a --- /dev/null +++ b/linode_api4/groups/image.py @@ -0,0 +1,173 @@ +from typing import BinaryIO, List, Optional, Tuple, Union + +import requests + +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import Disk, Image +from linode_api4.objects.base import _flatten_request_body_recursive +from linode_api4.util import drop_null_keys + + +class ImageGroup(Group): + def __call__(self, *filters): + """ + Retrieves a list of available Images, including public and private + Images available to the acting user. You can filter this query to + retrieve only Images relevant to a specific query, for example:: + + debian_images = client.images( + Image.vendor == "debain") + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-images + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of available Images. + :rtype: PaginatedList of Image + """ + return self.client._get_and_filter(Image, *filters) + + def create( + self, + disk: Union[Disk, int], + label: Optional[str] = None, + description: Optional[str] = None, + cloud_init: bool = False, + tags: Optional[List[str]] = None, + ): + """ + Creates a new Image from a disk you own. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-image + + :param disk: The Disk to imagize. + :type disk: Union[Disk, int] + :param label: The label for the resulting Image (defaults to the disk's + label. + :type label: str + :param description: The description for the new Image. + :type description: str + :param cloud_init: Whether this Image supports cloud-init. + :type cloud_init: bool + :param tags: A list of customized tags of this new Image. + :type tags: Optional[List[str]] + + :returns: The new Image. + :rtype: Image + """ + params = { + "disk_id": disk, + "label": label, + "description": description, + "tags": tags, + } + + if cloud_init: + params["cloud_init"] = cloud_init + + result = self.client.post( + "/images", + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating an Image from disk {}".format( + disk + ) + ) + + return Image(self.client, result["id"], result) + + def create_upload( + self, + label: str, + region: str, + description: Optional[str] = None, + cloud_init: bool = False, + tags: Optional[List[str]] = None, + ) -> Tuple[Image, str]: + """ + Creates a new Image and returns the corresponding upload URL. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-upload-image + + :param label: The label of the Image to create. + :type label: str + :param region: The region to upload to. Once the image has been created, it can be used in any region. + :type region: str + :param description: The description for the new Image. + :type description: str + :param cloud_init: Whether this Image supports cloud-init. + :type cloud_init: bool + :param tags: A list of customized tags of this Image. + :type tags: Optional[List[str]] + + :returns: A tuple containing the new image and the image upload URL. + :rtype: (Image, str) + """ + params = { + "label": label, + "region": region, + "description": description, + "tags": tags, + } + + if cloud_init: + params["cloud_init"] = cloud_init + + result = self.client.post("/images/upload", data=drop_null_keys(params)) + + if "image" not in result: + raise UnexpectedResponseError( + "Unexpected response when creating an Image upload URL" + ) + + result_image = result["image"] + result_url = result["upload_to"] + + return Image(self.client, result_image["id"], result_image), result_url + + def upload( + self, + label: str, + region: str, + file: BinaryIO, + description: Optional[str] = None, + tags: Optional[List[str]] = None, + ) -> Image: + """ + Creates and uploads a new image. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-upload-image + + :param label: The label of the Image to create. + :type label: str + :param region: The region to upload to. Once the image has been created, it can be used in any region. + :type region: str + :param file: The BinaryIO object to upload to the image. This is generally obtained from open("myfile", "rb"). + :param description: The description for the new Image. + :type description: str + :param tags: A list of customized tags of this Image. + :type tags: Optional[List[str]] + + :returns: The resulting image. + :rtype: Image + """ + + image, url = self.create_upload( + label, region, description=description, tags=tags + ) + + requests.put( + url, + headers={"Content-Type": "application/octet-stream"}, + data=file, + ) + + image._api_get() + + return image diff --git a/linode_api4/groups/image_share_group.py b/linode_api4/groups/image_share_group.py new file mode 100644 index 000000000..e932f400b --- /dev/null +++ b/linode_api4/groups/image_share_group.py @@ -0,0 +1,142 @@ +from typing import Optional + +from linode_api4.groups import Group +from linode_api4.objects import ( + ImageShareGroup, + ImageShareGroupImagesToAdd, + ImageShareGroupToken, +) +from linode_api4.objects.base import _flatten_request_body_recursive +from linode_api4.util import drop_null_keys + + +class ImageShareGroupAPIGroup(Group): + """ + Collections related to Private Image Sharing. + + NOTE: Private Image Sharing features are in beta and may not be generally available. + """ + + def __call__(self, *filters): + """ + Retrieves a list of Image Share Groups created by the user (producer). + You can filter this query to retrieve only Image Share Groups + relevant to a specific query, for example:: + + filtered_share_groups = client.sharegroups( + ImageShareGroup.label == "my-label") + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-sharegroups + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Image Share Groups. + :rtype: PaginatedList of ImageShareGroup + """ + return self.client._get_and_filter(ImageShareGroup, *filters) + + def sharegroups_by_image_id(self, image_id: str): + """ + Retrieves a list of Image Share Groups that share a specific Private Image. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-images-sharegroups-image + + :param image_id: The ID of the Image to query for. + :type image_id: str + + :returns: A list of Image Share Groups sharing the specified Image. + :rtype: PaginatedList of ImageShareGroup + """ + return self.client._get_and_filter( + ImageShareGroup, endpoint="/images/{}/sharegroups".format(image_id) + ) + + def tokens(self, *filters): + """ + Retrieves a list of Image Share Group Tokens created by the user (consumer). + You can filter this query to retrieve only Image Share Group Tokens + relevant to a specific query, for example:: + + filtered_share_group_tokens = client.sharegroups.tokens( + ImageShareGroupToken.label == "my-label") + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-user-tokens + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Image Share Group Tokens. + :rtype: PaginatedList of ImageShareGroupToken + """ + return self.client._get_and_filter(ImageShareGroupToken, *filters) + + def create_sharegroup( + self, + label: Optional[str] = None, + description: Optional[str] = None, + images: Optional[ImageShareGroupImagesToAdd] = None, + ): + """ + Creates a new Image Share Group. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-sharegroups + + :param label: The label for the resulting Image Share Group. + :type label: str + :param description: The description for the new Image Share Group. + :type description: str + :param images: A list of Images to share in the new Image Share Group, formatted in JSON. + :type images: Optional[ImageShareGroupImagesToAdd] + + :returns: The new Image Share Group. + :rtype: ImageShareGroup + """ + params = { + "label": label, + "description": description, + } + + if images: + params["images"] = images + + result = self.client.post( + "/images/sharegroups", + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + return ImageShareGroup(self.client, result["id"], result) + + def create_token( + self, valid_for_sharegroup_uuid: str, label: Optional[str] = None + ): + """ + Creates a new Image Share Group Token and returns the token value. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-sharegroup-tokens + + :param valid_for_sharegroup_uuid: The UUID of the Image Share Group that this token will be valid for. + :type valid_for_sharegroup_uuid: Optional[str] + :param label: The label for the resulting Image Share Group Token. + :type label: str + + :returns: The new Image Share Group Token object and the one-time use token itself. + :rtype: (ImageShareGroupToken, str) + """ + params = {"valid_for_sharegroup_uuid": valid_for_sharegroup_uuid} + + if label: + params["label"] = label + + result = self.client.post( + "/images/sharegroups/tokens", + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + token_value = result.pop("token", None) + token_obj = ImageShareGroupToken( + self.client, result["token_uuid"], result + ) + return token_obj, token_value diff --git a/linode_api4/groups/linode.py b/linode_api4/groups/linode.py new file mode 100644 index 000000000..2bd51fa97 --- /dev/null +++ b/linode_api4/groups/linode.py @@ -0,0 +1,489 @@ +import base64 +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +from linode_api4.common import load_and_validate_keys +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import ( + Firewall, + Instance, + InstanceDiskEncryptionType, + Kernel, + PlacementGroup, + StackScript, + Type, +) +from linode_api4.objects.base import _flatten_request_body_recursive +from linode_api4.objects.filtering import Filter +from linode_api4.objects.linode import ( + Backup, + InstancePlacementGroupAssignment, + InterfaceGeneration, + NetworkInterface, + _expand_placement_group_assignment, +) +from linode_api4.objects.linode_interfaces import LinodeInterfaceOptions +from linode_api4.util import drop_null_keys + + +class LinodeGroup(Group): + """ + Encapsulates Linode-related methods of the :any:`LinodeClient`. This + should not be instantiated on its own, but should instead be used through + an instance of :any:`LinodeClient`:: + + client = LinodeClient(token) + instances = client.linode.instances() # use the LinodeGroup + + This group contains all features beneath the `/linode` group in the API v4. + """ + + def types(self, *filters): + """ + Returns a list of Linode Instance types. These may be used to create + or resize Linodes, or simply referenced on their own. Types can be + filtered to return specific types, for example:: + + standard_types = client.linode.types(Type.class == "standard") + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-types + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of types that match the query. + :rtype: PaginatedList of Type + """ + return self.client._get_and_filter(Type, *filters) + + def instances(self, *filters): + """ + Returns a list of Linode Instances on your account. You may filter + this query to return only Linodes that match specific criteria:: + + prod_linodes = client.linode.instances(Instance.group == "prod") + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-instances + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Instances that matched the query. + :rtype: PaginatedList of Instance + """ + return self.client._get_and_filter(Instance, *filters) + + def stackscripts(self, *filters, **kwargs): + """ + Returns a list of :any:`StackScripts`, both public and + private. You may filter this query to return only + :any:`StackScripts` that match certain criteria. You may + also request only your own private :any:`StackScripts`:: + + my_stackscripts = client.linode.stackscripts(mine_only=True) + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-stack-scripts + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + :param mine_only: If True, returns only private StackScripts + :type mine_only: bool + + :returns: A list of StackScripts matching the query. + :rtype: PaginatedList of StackScript + """ + # python2 can't handle *args and a single keyword argument, so this is a workaround + if "mine_only" in kwargs: + if kwargs["mine_only"]: + new_filter = Filter({"mine": True}) + if filters: + filters = list(filters) + filters[0] = filters[0] & new_filter + else: + filters = [new_filter] + + del kwargs["mine_only"] + + if kwargs: + raise TypeError( + "stackscripts() got unexpected keyword argument '{}'".format( + kwargs.popitem()[0] + ) + ) + + return self.client._get_and_filter(StackScript, *filters) + + def kernels(self, *filters): + """ + Returns a list of available :any:`Kernels`. Kernels are used + when creating or updating :any:`LinodeConfigs,LinodeConfig>`. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-kernels + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of available kernels that match the query. + :rtype: PaginatedList of Kernel + """ + return self.client._get_and_filter(Kernel, *filters) + + # create things + def instance_create( + self, + ltype, + region, + image=None, + authorized_keys=None, + firewall: Optional[Union[Firewall, int]] = None, + backup: Optional[Union[Backup, int]] = None, + stackscript: Optional[Union[StackScript, int]] = None, + disk_encryption: Optional[ + Union[InstanceDiskEncryptionType, str] + ] = None, + placement_group: Optional[ + Union[ + InstancePlacementGroupAssignment, + PlacementGroup, + Dict[str, Any], + int, + ] + ] = None, + interfaces: Optional[ + List[ + Union[LinodeInterfaceOptions, NetworkInterface, Dict[str, Any]], + ] + ] = None, + interface_generation: Optional[Union[InterfaceGeneration, str]] = None, + network_helper: Optional[bool] = None, + maintenance_policy: Optional[str] = None, + **kwargs, + ): + """ + Creates a new Linode Instance. This function has several modes of operation: + + **Create an Instance from an Image** + + To create an Instance from an :any:`Image`, call `instance_create` with + a :any:`Type`, a :any:`Region`, and an :any:`Image`. All three of + these fields may be provided as either the ID or the appropriate object. + In this mode, a root password will be generated and returned with the + new Instance object. + + For example:: + + new_linode, password = client.linode.instance_create( + "g6-standard-2", + "us-east", + image="linode/debian9") + + ltype = client.linode.types().first() + region = client.regions().first() + image = client.images().first() + + another_linode, password = client.linode.instance_create( + ltype, + region, + image=image) + + To output the password from the above example: + print(password) + + To output the first IPv4 address of the new Linode: + print(new_linode.ipv4[0]) + + To delete the new_linode (WARNING: this immediately destroys the Linode): + new_linode.delete() + + **Create an Instance from StackScript** + + When creating an Instance from a :any:`StackScript`, an :any:`Image` that + the StackScript support must be provided.. You must also provide any + required StackScript data for the script's User Defined Fields.. For + example, if deploying `StackScript 10079`_ (which deploys a new Instance + with a user created from keys on `github`_:: + + stackscript = StackScript(client, 10079) + + new_linode, password = client.linode.instance_create( + "g6-standard-2", + "us-east", + image="linode/debian9", + stackscript=stackscript, + stackscript_data={"gh_username": "example"}) + + In the above example, "gh_username" is the name of a User Defined Field + in the chosen StackScript. For more information on StackScripts, see + the `StackScript guide`_. + + .. _`StackScript 10079`: https://www.linode.com/stackscripts/view/10079 + .. _`github`: https://github.com + .. _`StackScript guide`: https://www.linode.com/docs/platform/stackscripts/ + + **Create an Instance from a Backup** + + To create a new Instance by restoring a :any:`Backup` to it, provide a + :any:`Type`, a :any:`Region`, and the :any:`Backup` to restore. You + may provide either IDs or objects for all of these fields:: + + existing_linode = Instance(client, 123) + snapshot = existing_linode.available_backups.snapshot.current + + new_linode = client.linode.instance_create( + "g6-standard-2", + "us-east", + backup=snapshot) + + **Create an Instance with explicit interfaces:** + + To create a new Instance with explicit interfaces, provide list of + LinodeInterfaceOptions objects or dicts to the "interfaces" field:: + + linode, password = client.linode.instance_create( + "g6-standard-1", + "us-mia", + image="linode/ubuntu24.04", + + # This can be configured as an account-wide default + interface_generation=InterfaceGeneration.LINODE, + + interfaces=[ + LinodeInterfaceOptions( + default_route=LinodeInterfaceDefaultRouteOptions( + ipv4=True, + ipv6=True + ), + public=LinodeInterfacePublicOptions + ) + ] + ) + + **Create an empty Instance** + + If you want to create an empty Instance that you will configure manually, + simply call `instance_create` with a :any:`Type` and a :any:`Region`:: + + empty_linode = client.linode.instance_create("g6-standard-2", "us-east") + + When created this way, the Instance will not be booted and cannot boot + successfully until disks and configs are created, or it is otherwise + configured. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-linode-instance + + :param ltype: The Instance Type we are creating + :type ltype: str or Type + :param region: The Region in which we are creating the Instance + :type region: str or Region + :param image: The Image to deploy to this Instance. If this is provided + and no root_pass is given, a password will be generated + and returned along with the new Instance. + :type image: str or Image + :param stackscript: The StackScript to deploy to the new Instance. If + provided, "image" is required and must be compatible + with the chosen StackScript. + :type stackscript: int or StackScript + :param stackscript_data: Values for the User Defined Fields defined in + the chosen StackScript. Does nothing if + StackScript is not provided. + :type stackscript_data: dict + :param backup: The Backup to restore to the new Instance. May not be + provided if "image" is given. + :type backup: int of Backup + :param authorized_keys: The ssh public keys to install in the linode's + /root/.ssh/authorized_keys file. Each entry may + be a single key, or a path to a file containing + the key. + :type authorized_keys: list or str + :param label: The display label for the new Instance + :type label: str + :param group: The display group for the new Instance + :type group: str + :param booted: Whether the new Instance should be booted. This will + default to True if the Instance is deployed from an Image + or Backup. + :type booted: bool + :param tags: A list of tags to apply to the new instance. If any of the + tags included do not exist, they will be created as part of + this operation. + :type tags: list[str] + :param private_ip: Whether the new Instance should have private networking + enabled and assigned a private IPv4 address. + :type private_ip: bool + :param metadata: Metadata-related fields to use when creating the new Instance. + The contents of this field can be built using the + :any:`build_instance_metadata` method. + :type metadata: dict + :param firewall: The firewall to attach this Linode to. + :type firewall: int or Firewall + :param disk_encryption: The disk encryption policy for this Linode. + NOTE: Disk encryption may not currently be available to all users. + :type disk_encryption: InstanceDiskEncryptionType or str + :param interfaces: An array of Network Interfaces to add to this Linodeโ€™s Configuration Profile. + At least one and up to three Interface objects can exist in this array. + :type interfaces: List[LinodeInterfaceOptions], List[NetworkInterface], or List[dict[str, Any]] + :param placement_group: A Placement Group to create this Linode under. + :type placement_group: Union[InstancePlacementGroupAssignment, PlacementGroup, Dict[str, Any], int] + :param interface_generation: The generation of network interfaces this Linode uses. + :type interface_generation: InterfaceGeneration or str + :param network_helper: Whether this instance should have Network Helper enabled. + :type network_helper: bool + :param maintenance_policy: The slug of the maintenance policy to apply during maintenance. + If not provided, the default policy (linode/migrate) will be applied. + :type maintenance_policy: str + + :returns: A new Instance object, or a tuple containing the new Instance and + the generated password. + :rtype: Instance or tuple(Instance, str) + :raises ApiError: If contacting the API fails + :raises UnexpectedResponseError: If the API response is somehow malformed. + This usually indicates that you are using + an outdated library. + """ + + ret_pass = None + if image and not "root_pass" in kwargs: + ret_pass = Instance.generate_root_password() + kwargs["root_pass"] = ret_pass + + params = { + "type": ltype, + "region": region, + "image": image, + "authorized_keys": load_and_validate_keys(authorized_keys), + # These will automatically be flattened below + "firewall_id": firewall, + "backup_id": backup, + "stackscript_id": stackscript, + "maintenance_policy": maintenance_policy, + # Special cases + "disk_encryption": ( + str(disk_encryption) if disk_encryption else None + ), + "placement_group": ( + _expand_placement_group_assignment(placement_group) + if placement_group + else None + ), + "interfaces": interfaces, + "interface_generation": interface_generation, + "network_helper": network_helper, + } + + params.update(kwargs) + + result = self.client.post( + "/linode/instances", + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating linode!", json=result + ) + + l = Instance(self.client, result["id"], result) + if not ret_pass: + return l + return l, ret_pass + + @staticmethod + def build_instance_metadata(user_data=None, encode_user_data=True): + """ + Builds the contents of the ``metadata`` field to be passed into + the :any:`instance_create` method. This helper can also be used + when cloning and rebuilding Instances. + **Creating an Instance with User Data**:: + new_linode, password = client.linode.instance_create( + "g6-standard-2", + "us-east", + image="linode/ubuntu22.04", + metadata=client.linode.build_instance_metadata(user_data="myuserdata") + ) + :param user_data: User-defined data to provide to the Linode Instance through + the Metadata service. + :type user_data: str + :param encode_user_data: If true, the provided user_data field will be automatically + encoded to a valid base64 string. This field should only + be set to false if the user_data param is already base64-encoded. + :type encode_user_data: bool + :returns: The built ``metadata`` structure. + :rtype: dict + """ + result = {} + + if user_data is not None: + result["user_data"] = ( + base64.b64encode(user_data.encode()).decode() + if encode_user_data + else user_data + ) + + return result + + def stackscript_create( + self, label, script, images, desc=None, public=False, **kwargs + ): + """ + Creates a new :any:`StackScript` on your account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-add-stack-script + + :param label: The label for this StackScript. + :type label: str + :param script: The script to run when an :any:`Instance` is deployed with + this StackScript. Must begin with a shebang (#!). + :type script: str + :param images: A list of :any:`Images` that this StackScript + supports. Instances will not be deployed from this + StackScript unless deployed from one of these Images. + :type images: list of Image + :param desc: A description for this StackScript. + :type desc: str + :param public: Whether this StackScript is public. Defaults to False. + Once a StackScript is made public, it may not be set + back to private. + :type public: bool + + :returns: The new StackScript + :rtype: StackScript + """ + + script_body = script + if not script.startswith("#!"): + # it doesn't look like a stackscript body, let's see if it's a file + script_path = Path(script) + if script_path.is_file(): + with open(script_path) as f: + script_body = f.read() + else: + raise ValueError( + "script must be the script text or a path to a file" + ) + + params = { + "label": label, + "images": images, + "is_public": public, + "script": script_body, + "description": desc if desc else "", + } + params.update(kwargs) + + result = self.client.post( + "/linode/stackscripts", + data=_flatten_request_body_recursive(params), + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating StackScript!", json=result + ) + + s = StackScript(self.client, result["id"], result) + return s diff --git a/linode_api4/groups/lke.py b/linode_api4/groups/lke.py new file mode 100644 index 000000000..330c1d378 --- /dev/null +++ b/linode_api4/groups/lke.py @@ -0,0 +1,216 @@ +from typing import Any, Dict, Optional, Union + +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.groups.lke_tier import LKETierGroup +from linode_api4.objects import ( + KubeVersion, + LKECluster, + LKEClusterControlPlaneOptions, + LKEType, + Type, + drop_null_keys, +) +from linode_api4.objects.base import _flatten_request_body_recursive + + +class LKEGroup(Group): + """ + Encapsulates LKE-related methods of the :any:`LinodeClient`. This + should not be instantiated on its own, but should instead be used through + an instance of :any:`LinodeClient`:: + + client = LinodeClient(token) + instances = client.lke.clusters() # use the LKEGroup + + This group contains all features beneath the `/lke` group in the API v4. + """ + + def versions(self, *filters): + """ + Returns a :any:`PaginatedList` of :any:`KubeVersion` objects that can be + used when creating an LKE Cluster. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-versions + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A Paginated List of kube versions that match the query. + :rtype: PaginatedList of KubeVersion + """ + return self.client._get_and_filter(KubeVersion, *filters) + + def clusters(self, *filters): + """ + Returns a :any:`PaginagtedList` of :any:`LKECluster` objects that belong + to this account. + + https://techdocs.akamai.com/linode-api/reference/get-lke-clusters + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A Paginated List of LKE clusters that match the query. + :rtype: PaginatedList of LKECluster + """ + return self.client._get_and_filter(LKECluster, *filters) + + def cluster_create( + self, + region, + label, + kube_version, + node_pools: Optional[list] = None, + control_plane: Union[ + LKEClusterControlPlaneOptions, Dict[str, Any] + ] = None, + apl_enabled: bool = False, + tier: Optional[str] = None, + **kwargs, + ): + """ + Creates an :any:`LKECluster` on this account in the given region, with + the given label, and with node pools as described. For example:: + + client = LinodeClient(TOKEN) + + # look up Region and Types to use. In this example I'm just using + # the first ones returned. + target_region = client.regions().first() + node_type = client.linode.types()[0] + node_type_2 = client.linode.types()[1] + kube_version = client.lke.versions()[0] + + new_cluster = client.lke.cluster_create( + target_region, + "example-cluster", + [client.lke.node_pool(node_type, 3), client.lke.node_pool(node_type_2, 3)], + kube_version + ) + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-lke-cluster + + :param region: The Region to create this LKE Cluster in. + :type region: Region or str + :param label: The label for the new LKE Cluster. + :type label: str + :param node_pools: The Node Pools to create. + :type node_pools: one or a list of dicts containing keys "type" and "count". See + :any:`node_pool` for a convenient way to create correctly- + formatted dicts. + :param kube_version: The version of Kubernetes to use + :type kube_version: KubeVersion or str + :param control_plane: The control plane configuration of this LKE cluster. + :type control_plane: Dict[str, Any] or LKEClusterControlPlaneRequest + :param apl_enabled: Whether this cluster should use APL. + NOTE: This field is in beta and may only + function if base_url is set to `https://api.linode.com/v4beta`. + :type apl_enabled: bool + :param tier: The tier of LKE cluster to create. + NOTE: This field is in beta and may only + function if base_url is set to `https://api.linode.com/v4beta`. + :type tier: str + :param kwargs: Any other arguments to pass along to the API. See the API + docs for possible values. + + :returns: The new LKE Cluster + :rtype: LKECluster + """ + if node_pools is None: + node_pools = [] + + if len(node_pools) == 0 and ( + tier is None or tier.lower() != "enterprise" + ): + raise ValueError( + "LKE standard clusters must have at least one node pool." + ) + + params = { + "label": label, + "region": region, + "k8s_version": kube_version, + "node_pools": ( + node_pools if isinstance(node_pools, list) else [node_pools] + ), + "control_plane": control_plane, + "tier": tier, + } + params.update(kwargs) + + # Prevent errors for users without access to APL + if apl_enabled: + params["apl_enabled"] = apl_enabled + + result = self.client.post( + "/lke/clusters", + data=drop_null_keys(_flatten_request_body_recursive(params)), + ) + + if "id" not in result: + raise UnexpectedResponseError( + "Unexpected response when creating LKE cluster!", json=result + ) + + return LKECluster(self.client, result["id"], result) + + def node_pool(self, node_type: Union[Type, str], node_count: int, **kwargs): + """ + Returns a dict that is suitable for passing into the `node_pools` array + of :any:`cluster_create`. This is a convenience method, and need not be + used to create Node Pools. For proper usage, see the docs for :any:`cluster_create`. + + :param node_type: The type of node to create in this node pool. + :type node_type: Type or str + :param node_count: The number of nodes to create in this node pool. + :type node_count: int + :param kwargs: Other attributes to create this node pool with. + :type kwargs: Any + + :returns: A dict describing the desired node pool. + :rtype: dict + """ + result = { + "type": node_type, + "count": node_count, + } + + result.update(kwargs) + + return result + + def types(self, *filters): + """ + Returns a :any:`PaginatedList` of :any:`LKEType` objects that represents a valid LKE type. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-types + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A Paginated List of LKE types that match the query. + :rtype: PaginatedList of LKEType + """ + + return self.client._get_and_filter( + LKEType, *filters, endpoint="/lke/types" + ) + + def tier(self, id: str) -> LKETierGroup: + """ + Returns an object representing the LKE tier API path. + + NOTE: LKE tiers may not currently be available to all users. + + :param id: The ID of the tier. + :type id: str + + :returns: An object representing the LKE tier API path. + :rtype: LKETier + """ + + return LKETierGroup(self.client, id) diff --git a/linode_api4/groups/lke_tier.py b/linode_api4/groups/lke_tier.py new file mode 100644 index 000000000..e5b8d11e5 --- /dev/null +++ b/linode_api4/groups/lke_tier.py @@ -0,0 +1,40 @@ +from linode_api4.groups import Group +from linode_api4.objects import TieredKubeVersion + + +class LKETierGroup(Group): + """ + Encapsulates methods related to a specific LKE tier. This + should not be instantiated on its own, but should instead be used through + an instance of :any:`LinodeClient`:: + + client = LinodeClient(token) + instances = client.lke.tier("standard") # use the LKETierGroup + + This group contains all features beneath the `/lke/tiers/{tier}` group in the API v4. + """ + + def __init__(self, client: "LinodeClient", tier: str): + super().__init__(client) + self.tier = tier + + def versions(self, *filters): + """ + Returns a paginated list of versions for this tier matching the given filters. + + API Documentation: Not Yet Available + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A paginated list of kube versions that match the query. + :rtype: PaginatedList of TieredKubeVersion + """ + + return self.client._get_and_filter( + TieredKubeVersion, + endpoint=f"/lke/tiers/{self.tier}/versions", + parent_id=self.tier, + *filters, + ) diff --git a/linode_api4/groups/lock.py b/linode_api4/groups/lock.py new file mode 100644 index 000000000..42cc58d80 --- /dev/null +++ b/linode_api4/groups/lock.py @@ -0,0 +1,72 @@ +from typing import Union + +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import Lock, LockType + +__all__ = ["LockGroup"] + + +class LockGroup(Group): + """ + Encapsulates methods for interacting with Resource Locks. + + Resource locks prevent deletion or modification of resources. + Currently, only Linode instances can be locked. + """ + + def __call__(self, *filters): + """ + Returns a list of all Resource Locks on the account. + + This is intended to be called off of the :any:`LinodeClient` + class, like this:: + + locks = client.locks() + + API Documentation: TBD + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Resource Locks on the account. + :rtype: PaginatedList of Lock + """ + return self.client._get_and_filter(Lock, *filters) + + def create( + self, + entity_type: str, + entity_id: Union[int, str], + lock_type: Union[LockType, str], + ) -> Lock: + """ + Creates a new Resource Lock for the specified entity. + + API Documentation: TBD + + :param entity_type: The type of entity to lock (e.g., "linode"). + :type entity_type: str + :param entity_id: The ID of the entity to lock. + :type entity_id: int | str + :param lock_type: The type of lock to create. Defaults to "cannot_delete". + :type lock_type: LockType | str + + :returns: The newly created Resource Lock. + :rtype: Lock + """ + params = { + "entity_type": entity_type, + "entity_id": entity_id, + "lock_type": lock_type, + } + + result = self.client.post("/locks", data=params) + + if "id" not in result: + raise UnexpectedResponseError( + "Unexpected response when creating lock!", json=result + ) + + return Lock(self.client, result["id"], result) diff --git a/linode_api4/groups/longview.py b/linode_api4/groups/longview.py new file mode 100644 index 000000000..3f2b292e3 --- /dev/null +++ b/linode_api4/groups/longview.py @@ -0,0 +1,107 @@ +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import ( + LongviewClient, + LongviewPlan, + LongviewSubscription, +) + + +class LongviewGroup(Group): + """ + Collections related to Linode Longview. + """ + + def clients(self, *filters): + """ + Requests and returns a paginated list of LongviewClients on your + account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-longview-clients + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Longview Clients matching the given filters. + :rtype: PaginatedList of LongviewClient + """ + return self.client._get_and_filter(LongviewClient, *filters) + + def client_create(self, label=None): + """ + Creates a new LongviewClient, optionally with a given label. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-longview-client + + :param label: The label for the new client. If None, a default label based + on the new client's ID will be used. + + :returns: A new LongviewClient + + :raises ApiError: If a non-200 status code is returned + :raises UnexpectedResponseError: If the returned data from the api does + not look as expected. + """ + result = self.client.post("/longview/clients", data={"label": label}) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating Longview Client!", + json=result, + ) + + c = LongviewClient(self.client, result["id"], result) + return c + + def subscriptions(self, *filters): + """ + Requests and returns a paginated list of LongviewSubscriptions available + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-longview-subscriptions + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Longview Subscriptions matching the given filters. + :rtype: PaginatedList of LongviewSubscription + """ + return self.client._get_and_filter(LongviewSubscription, *filters) + + def longview_plan_update(self, longview_subscription): + """ + Update your Longview plan to that of the given subcription ID. + + :param longview_subscription: The subscription ID for a particular Longview plan. + A value of null corresponds to Longview Free. + :type longview_subscription: str + + :returns: The updated Longview Plan + :rtype: LongviewPlan + """ + + if longview_subscription not in [ + "", + "longview-3", + "longview-10", + "longview-40", + "longview-100", + ]: + raise ValueError( + "Invalid longview plan subscription: {}".format( + longview_subscription + ) + ) + + params = {"longview_subscription": longview_subscription} + + result = self.client.post( + LongviewPlan.api_endpoint, model=self, data=params + ) + + plan = LongviewPlan(self.client, result["id"], result) + + plan.invalidate() + + return plan diff --git a/linode_api4/groups/maintenance.py b/linode_api4/groups/maintenance.py new file mode 100644 index 000000000..63cb424df --- /dev/null +++ b/linode_api4/groups/maintenance.py @@ -0,0 +1,23 @@ +from linode_api4.groups import Group +from linode_api4.objects import MappedObject + + +class MaintenanceGroup(Group): + """ + Collections related to Maintenance. + """ + + def maintenance_policies(self): + """ + Returns a collection of MaintenancePolicy objects representing + available maintenance policies that can be applied to Linodes + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-maintenance-policies + + :returns: A list of Maintenance Policies that can be applied to Linodes + :rtype: List of MaintenancePolicy objects as MappedObjects + """ + + result = self.client.get("/maintenance/policies", model=self) + + return [MappedObject(**r) for r in result["data"]] diff --git a/linode_api4/groups/monitor.py b/linode_api4/groups/monitor.py new file mode 100644 index 000000000..66943ade5 --- /dev/null +++ b/linode_api4/groups/monitor.py @@ -0,0 +1,286 @@ +from typing import Any, Optional + +from linode_api4 import PaginatedList +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import ( + AlertChannel, + AlertDefinition, + MonitorDashboard, + MonitorMetricsDefinition, + MonitorService, + MonitorServiceToken, +) + +__all__ = [ + "MonitorGroup", +] + + +class MonitorGroup(Group): + """ + Encapsulates Monitor-related methods of the :any:`LinodeClient`. + + This group contains all features beneath the `/monitor` group in the API v4. + """ + + def dashboards( + self, *filters, service_type: Optional[str] = None + ) -> PaginatedList: + """ + Returns a list of dashboards. If `service_type` is provided, it fetches dashboards + for the specific service type. If None, it fetches all dashboards. + + dashboards = client.monitor.dashboards() + dashboard = client.load(MonitorDashboard, 1) + dashboards_by_service = client.monitor.dashboards(service_type="dbaas") + + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + API Documentation: + - All Dashboards: https://techdocs.akamai.com/linode-api/reference/get-dashboards-all + - Dashboards by Service: https://techdocs.akamai.com/linode-api/reference/get-dashboards + + :param service_type: The service type to get dashboards for. + :type service_type: Optional[str] + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Dashboards. + :rtype: PaginatedList of Dashboard + """ + endpoint = ( + f"/monitor/services/{service_type}/dashboards" + if service_type + else "/monitor/dashboards" + ) + + return self.client._get_and_filter( + MonitorDashboard, + *filters, + endpoint=endpoint, + ) + + def services( + self, + *filters, + ) -> PaginatedList: + """ + Lists services supported by ACLP. + supported_services = client.monitor.services() + service_details = client.monitor.load(MonitorService, "dbaas") + + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-services + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-services-for-service-type + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: Lists monitor services + :rtype: PaginatedList of the Services + """ + endpoint = "/monitor/services" + + return self.client._get_and_filter( + MonitorService, + *filters, + endpoint=endpoint, + ) + + def metric_definitions( + self, service_type: str, *filters + ) -> list[MonitorMetricsDefinition]: + """ + Returns metrics for a specific service type. + + metrics = client.monitor.list_metric_definitions(service_type="dbaas") + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-information + + :param service_type: The service type to get metrics for. + :type service_type: str + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: Returns a List of metrics for a service + :rtype: PaginatedList of metrics + """ + return self.client._get_and_filter( + MonitorMetricsDefinition, + *filters, + endpoint=f"/monitor/services/{service_type}/metric-definitions", + ) + + def create_token( + self, service_type: str, entity_ids: list[Any] + ) -> MonitorServiceToken: + """ + Returns a JWE Token for a specific service type. + token = client.monitor.create_token(service_type="dbaas", entity_ids=[1234]) + + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-get-token + + :param service_type: The service type to create token for. + :type service_type: str + :param entity_ids: The list of entity IDs for which the token is valid. + :type entity_ids: any + + :returns: Returns a token for a service + :rtype: str + """ + + params = {"entity_ids": entity_ids} + + result = self.client.post( + f"/monitor/services/{service_type}/token", data=params + ) + + if "token" not in result: + raise UnexpectedResponseError( + "Unexpected response when creating token!", json=result + ) + return MonitorServiceToken(token=result["token"]) + + def alert_definitions( + self, + *filters, + service_type: Optional[str] = None, + ) -> PaginatedList: + """ + Retrieve alert definitions. + + Returns a paginated collection of :class:`AlertDefinition` objects. If you + need to obtain a single :class:`AlertDefinition`, use :meth:`LinodeClient.load` + and supply the `service_type` as the parent identifier, for example: + + alerts = client.monitor.alert_definitions() + alerts_by_service = client.monitor.alert_definitions(service_type="dbaas") + .. note:: This endpoint is in beta and requires using the v4beta base URL. + + API Documentation: + https://techdocs.akamai.com/linode-api/reference/get-alert-definitions + https://techdocs.akamai.com/linode-api/reference/get-alert-definitions-for-service-type + + :param service_type: Optional service type to scope the query (e.g. ``"dbaas"``). + :type service_type: Optional[str] + :param filters: Optional filtering expressions to apply to the returned + collection. See :doc:`Filtering Collections`. + + :returns: A paginated list of :class:`AlertDefinition` objects. + :rtype: PaginatedList[AlertDefinition] + """ + + endpoint = "/monitor/alert-definitions" + if service_type: + endpoint = f"/monitor/services/{service_type}/alert-definitions" + + # Requesting a list + return self.client._get_and_filter( + AlertDefinition, *filters, endpoint=endpoint + ) + + def alert_channels(self, *filters) -> PaginatedList: + """ + List alert channels for the authenticated account. + + Returns a paginated collection of :class:`AlertChannel` objects which + describe destinations for alert notifications (for example: email + lists, webhooks, PagerDuty, Slack, etc.). By default this method + returns all channels visible to the authenticated account; you can + supply optional filter expressions to restrict the results. + + Examples: + channels = client.monitor.alert_channels() + + .. note:: This endpoint is in beta and requires using the v4beta base URL. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-alert-channels + + :param filters: Optional filter expressions to apply to the collection. + See :doc:`Filtering Collections` for details. + :returns: A paginated list of :class:`AlertChannel` objects. + :rtype: PaginatedList[AlertChannel] + """ + return self.client._get_and_filter(AlertChannel, *filters) + + def create_alert_definition( + self, + service_type: str, + label: str, + severity: int, + channel_ids: list[int], + rule_criteria: dict, + trigger_conditions: dict, + entity_ids: Optional[list[str]] = None, + description: Optional[str] = None, + ) -> AlertDefinition: + """ + Create a new alert definition for a given service type. + + The alert definition configures when alerts are fired and which channels + are notified. + + .. note:: This endpoint is in beta and requires using the v4beta base URL. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-alert-definition-for-service-type + + :param service_type: Service type for which to create the alert definition + (e.g. ``"dbaas"``). + :type service_type: str + :param label: Human-readable label for the alert definition. + :type label: str + :param severity: Severity level for the alert (numeric severity used by API). + :type severity: int + :param channel_ids: List of alert channel IDs to notify when the alert fires. + :type channel_ids: list[int] + :param rule_criteria: Rule criteria that determine when the alert + should be evaluated. Structure depends on the service + metric definitions. + :type rule_criteria: dict + :param trigger_conditions: Trigger conditions that define when + the alert should transition state. + :type trigger_conditions: dict + :param entity_ids: (Optional) Restrict the alert to a subset of entity IDs. + :type entity_ids: Optional[list[str]] + :param description: (Optional) Longer description for the alert definition. + :type description: Optional[str] + + :returns: The newly created :class:`AlertDefinition`. + :rtype: AlertDefinition + + .. note:: + For updating an alert definition, use the ``save()`` method on the :class:`AlertDefinition` object. + For deleting an alert definition, use the ``delete()`` method directly on the :class:`AlertDefinition` object. + """ + params = { + "label": label, + "severity": severity, + "channel_ids": channel_ids, + "rule_criteria": rule_criteria, + "trigger_conditions": trigger_conditions, + } + if description is not None: + params["description"] = description + if entity_ids is not None: + params["entity_ids"] = entity_ids + + # API will validate service_type and return an error if missing + result = self.client.post( + f"/monitor/services/{service_type}/alert-definitions", data=params + ) + + if "id" not in result: + raise UnexpectedResponseError( + "Unexpected response when creating alert definition!", + json=result, + ) + + return AlertDefinition(self.client, result["id"], service_type, result) diff --git a/linode_api4/groups/monitor_api.py b/linode_api4/groups/monitor_api.py new file mode 100644 index 000000000..48e2b2c30 --- /dev/null +++ b/linode_api4/groups/monitor_api.py @@ -0,0 +1,59 @@ +__all__ = [ + "MetricsGroup", +] + +from typing import Any, Dict, List, Optional, Union + +from linode_api4 import drop_null_keys +from linode_api4.groups import Group +from linode_api4.objects.base import _flatten_request_body_recursive +from linode_api4.objects.monitor_api import EntityMetricOptions, EntityMetrics + + +class MetricsGroup(Group): + """ + Encapsulates Monitor-related methods of the :any:`MonitorClient`. + + This group contains all features related to metrics in the API monitor-api. + """ + + def fetch_metrics( + self, + service_type: str, + entity_ids: list, + metrics: List[Union[EntityMetricOptions, Dict[str, Any]]], + **kwargs, + ) -> Optional[EntityMetrics]: + """ + Returns metrics information for the individual entities within a specific service type. + + API documentation: https://techdocs.akamai.com/linode-api/reference/post-read-metric + + :param service_type: The service being monitored. + Currently, only the Managed Databases (dbaas) service type is supported. + :type service_type: str + + :param entity_ids: The id for each individual entity from a service_type. + :type entity_ids: list + + :param metrics: A list of metric objects, each specifying a metric name and its corresponding aggregation function. + :type metrics: list of EntityMetricOptions or Dict[str, Any] + + :param kwargs: Any other arguments accepted by the api. Please refer to the API documentation for full info. + + :returns: Service metrics requested. + :rtype: EntityMetrics or None + """ + params = { + "entity_ids": entity_ids, + "metrics": metrics, + } + + params.update(kwargs) + + result = self.client.post( + f"/monitor/services/{service_type}/metrics", + data=drop_null_keys(_flatten_request_body_recursive(params)), + ) + + return EntityMetrics.from_json(result) diff --git a/linode_api4/groups/networking.py b/linode_api4/groups/networking.py new file mode 100644 index 000000000..b16d12d9a --- /dev/null +++ b/linode_api4/groups/networking.py @@ -0,0 +1,512 @@ +from typing import Any, Dict, Optional, Union + +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import ( + VLAN, + Base, + Firewall, + FirewallCreateDevicesOptions, + FirewallSettings, + FirewallTemplate, + Instance, + IPAddress, + IPv6Pool, + IPv6Range, + NetworkTransferPrice, + Region, +) +from linode_api4.objects.base import _flatten_request_body_recursive +from linode_api4.util import drop_null_keys + + +class NetworkingGroup(Group): + """ + Collections related to Linode Networking. + """ + + def firewalls(self, *filters): + """ + Retrieves the Firewalls your user has access to. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-firewalls + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Firewalls the acting user can access. + :rtype: PaginatedList of Firewall + """ + return self.client._get_and_filter(Firewall, *filters) + + def firewall_create( + self, + label: str, + rules: Dict[str, Any], + devices: Optional[ + Union[FirewallCreateDevicesOptions, Dict[str, Any]] + ] = None, + **kwargs, + ): + """ + Creates a new Firewall, either in the given Region or + attached to the given Instance. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-firewalls + + :param label: The label for the new Firewall. + :type label: str + :param rules: The rules to apply to the new Firewall. For more information on Firewall rules, see our `Firewalls Documentation`_. + :type rules: dict + :param devices: Represents devices to create created alongside a Linode Firewall. + :type devices: Optional[Union[FirewallCreateDevicesOptions, Dict[str, Any]]] + + :returns: The new Firewall. + :rtype: Firewall + + Example usage:: + + rules = { + 'outbound': [ + { + 'action': 'ACCEPT', + 'addresses': { + 'ipv4': [ + '0.0.0.0/0' + ], + 'ipv6': [ + "ff00::/8" + ] + }, + 'description': 'Allow HTTP out.', + 'label': 'allow-http-out', + 'ports': '80', + 'protocol': 'TCP' + } + ], + 'outbound_policy': 'DROP', + 'inbound': [], + 'inbound_policy': 'DROP' + } + + firewall = client.networking.firewall_create('my-firewall', rules) + + .. _Firewalls Documentation: https://techdocs.akamai.com/linode-api/reference/post-firewalls + """ + + params = { + "label": label, + "rules": rules, + "devices": devices, + } + params.update(kwargs) + + result = self.client.post( + "/networking/firewalls", + data=drop_null_keys(_flatten_request_body_recursive(params)), + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating Firewall!", json=result + ) + + f = Firewall(self.client, result["id"], result) + return f + + def firewall_templates(self, *filters): + """ + Returns a list of Firewall Templates available to the current user. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-firewall-templates + + NOTE: This feature may not currently be available to all users. + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Firewall Templates available to the current user. + :rtype: PaginatedList of FirewallTemplate + """ + return self.client._get_and_filter(FirewallTemplate, *filters) + + def firewall_settings(self) -> FirewallSettings: + """ + Returns an object representing the Linode Firewall settings for the current user. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-firewall-settings + + NOTE: This feature may not currently be available to all users. + :returns: An object representing the Linode Firewall settings for the current user. + :rtype: FirewallSettings + """ + result = self.client.get("/networking/firewalls/settings") + + if "default_firewall_ids" not in result: + raise UnexpectedResponseError( + "Unexpected response when getting firewall settings!", + json=result, + ) + + return FirewallSettings(self.client, None, result) + + def ips(self, *filters): + """ + Returns a list of IP addresses on this account, excluding private addresses. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-ips + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of IP addresses on this account. + :rtype: PaginatedList of IPAddress + """ + return self.client._get_and_filter(IPAddress, *filters) + + def ipv6_ranges(self, *filters): + """ + Returns a list of IPv6 ranges on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-ipv6-ranges + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of IPv6 ranges on this account. + :rtype: PaginatedList of IPv6Range + """ + return self.client._get_and_filter(IPv6Range, *filters) + + def ipv6_range_allocate( + self, + prefix_length: int, + route_target: Optional[str] = None, + linode: Optional[Union[Instance, int]] = None, + **kwargs, + ) -> IPv6Range: + """ + Creates an IPv6 Range and assigns it based on the provided Linode or route target IPv6 SLAAC address. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-ipv6-range + + Create an IPv6 range assigned to a Linode by ID:: + + range = client.networking.ipv6_range_allocate(64, linode_id=123) + + + Create an IPv6 range assigned to a Linode by SLAAC:: + + range = client.networking.ipv6_range_allocate( + 64, + route_target=instance.ipv6.split("/")[0] + ) + + :param prefix_length: The prefix length of the IPv6 range. + :type prefix_length: int + :param route_target: The IPv6 SLAAC address to assign this range to. Required if linode is not specified. + :type route_target: str + :param linode: The ID of the Linode to assign this range to. + The SLAAC address for the provided Linode is used as the range's route_target. + Required if linode is not specified. + :type linode: Instance or int + + :returns: The new IPAddress. + :rtype: IPAddress + """ + + params = { + "prefix_length": prefix_length, + "route_target": route_target, + "linode_id": linode, + } + + params.update(**kwargs) + + result = self.client.post( + "/networking/ipv6/ranges", + data=drop_null_keys(_flatten_request_body_recursive(params)), + ) + + if not "range" in result: + raise UnexpectedResponseError( + "Unexpected response when allocating IPv6 range!", json=result + ) + + result = IPv6Range(self.client, result["range"], result) + return result + + def ipv6_pools(self, *filters): + """ + Returns a list of IPv6 pools on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-ipv6-pools + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of IPv6 pools on this account. + :rtype: PaginatedList of IPv6Pool + """ + + return self.client._get_and_filter(IPv6Pool, *filters) + + def vlans(self, *filters): + """ + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + Returns a list of VLANs on your account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-vlans + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A List of VLANs on your account. + :rtype: PaginatedList of VLAN + """ + return self.client._get_and_filter(VLAN, *filters) + + def ips_assign(self, region, *assignments): + """ + Redistributes :any:`IP Addressees` within a single region. + This function takes a :any:`Region` and a list of assignments to make, + then requests that the assignments take place. If any :any:`Instance` + ends up without a public IP, or with more than one private IP, all of + the assignments will fail. + + .. note:: + This function *does not* update the local Linode Instance objects + when called. In order to see the new addresses on the local + instance objects, be sure to invalidate them with ``invalidate()`` + after this completes. + + Example usage:: + + linode1 = Instance(client, 123) + linode2 = Instance(client, 456) + + # swap IPs between linodes 1 and 2 + client.networking.assign_ips(linode1.region, + linode1.ips.ipv4.public[0].to(linode2), + linode2.ips.ipv4.public[0].to(linode1)) + + # make sure linode1 and linode2 have updated ipv4 and ips values + linode1.invalidate() + linode2.invalidate() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-assign-ipv4s + + :param region: The Region in which the assignments should take place. + All Instances and IPAddresses involved in the assignment + must be within this region. + :type region: str or Region + :param assignments: Any number of assignments to make. See + :any:`IPAddress.to` for details on how to construct + assignments. + :type assignments: dct + + DEPRECATED: Use ip_addresses_assign() instead + """ + for a in assignments: + if not "address" in a or not "linode_id" in a: + raise ValueError("Invalid assignment: {}".format(a)) + if isinstance(region, Region): + region = region.id + + self.client.post( + "/networking/ipv4/assign", + data={ + "region": region, + "assignments": assignments, + }, + ) + + def ip_allocate(self, linode, public=True): + """ + Allocates an IP to a Instance you own. Additional IPs must be requested + by opening a support ticket first. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-allocate-ip + + :param linode: The Instance to allocate the new IP for. + :type linode: Instance or int + :param public: If True, allocate a public IP address. Defaults to True. + :type public: bool + + :returns: The new IPAddress. + :rtype: IPAddress + """ + result = self.client.post( + "/networking/ips/", + data={ + "linode_id": linode.id if isinstance(linode, Base) else linode, + "type": "ipv4", + "public": public, + }, + ) + + if not "address" in result: + raise UnexpectedResponseError( + "Unexpected response when adding IPv4 address!", json=result + ) + + ip = IPAddress(self.client, result["address"], result) + return ip + + def ips_share(self, linode, *ips): + """ + Shares the given list of :any:`IPAddresses` with the provided + :any:`Instance`. This will enable the provided Instance to bring up the + shared IP Addresses even though it does not own them. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-share-ipv4s + + :param linode: The Instance to share the IPAddresses with. This Instance + will be able to bring up the given addresses. + :type: linode: int or Instance + :param ips: Any number of IPAddresses to share to the Instance. + :type ips: str or IPAddress + + DEPRECATED: Use ip_addresses_share() instead + """ + if not isinstance(linode, Instance): + # make this an object + linode = Instance(self.client, linode) + + params = [] + for ip in ips: + if isinstance(ip, str): + params.append(ip) + elif isinstance(ip, IPAddress): + params.append(ip.address) + else: + params.append(str(ip)) # and hope that works + + params = {"ips": params} + + self.client.post( + "{}/networking/ipv4/share".format(Instance.api_endpoint), + model=linode, + data=params, + ) + + linode.invalidate() # clear the Instance's shared IPs + + def ip_addresses_share(self, ips, linode): + """ + Configure shared IPs. IP sharing allows IP address reassignment + (also referred to as IP failover) from one Linode to another if the + primary Linode becomes unresponsive. This means that requests to the primary Linodeโ€™s + IP address can be automatically rerouted to secondary Linodes at the configured shared IP addresses. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-share-ips + + :param linode: The id of the Instance or the Instance to share the IPAddresses with. + This Instance will be able to bring up the given addresses. + :type: linode: int or Instance + :param ips: Any number of IPAddresses to share to the Instance. Enter an empty array to + remove all shared IP addresses. + :type ips: str or IPAddress + """ + + shared_ips = [] + for ip in ips: + if isinstance(ip, str): + shared_ips.append(ip) + elif isinstance(ip, IPAddress): + shared_ips.append(ip.address) + else: + shared_ips.append(str(ip)) # and hope that works + + params = { + "ips": shared_ips, + "linode_id": ( + linode if not isinstance(linode, Instance) else linode.id + ), + } + + self.client.post("/networking/ips/share", model=self, data=params) + + def ip_addresses_assign(self, assignments, region): + """ + Assign multiple IPv4 addresses and/or IPv6 ranges to multiple Linodes in one Region. + This allows swapping, shuffling, or otherwise reorganizing IPs to your Linodes. + + The following restrictions apply: + - All Linodes involved must have at least one public IPv4 address after assignment. + - Linodes may have no more than one assigned private IPv4 address. + - Linodes may have no more than one assigned IPv6 range. + + + :param region: The Region in which the assignments should take place. + All Instances and IPAddresses involved in the assignment + must be within this region. + :type region: str or Region + :param assignments: Any number of assignments to make. See + :any:`IPAddress.to` for details on how to construct + assignments. + :type assignments: dct + """ + + for a in assignments["assignments"]: + if not "address" in a or not "linode_id" in a: + raise ValueError("Invalid assignment: {}".format(a)) + + if isinstance(region, Region): + region = region.id + + params = {"assignments": assignments, "region": region} + + self.client.post("/networking/ips/assign", model=self, data=params) + + def transfer_prices(self, *filters): + """ + Returns a :any:`PaginatedList` of :any:`NetworkTransferPrice` objects that represents a valid network transfer price. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-network-transfer-prices + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A Paginated List of network transfer price that match the query. + :rtype: PaginatedList of NetworkTransferPrice + """ + + return self.client._get_and_filter( + NetworkTransferPrice, *filters, endpoint="/network-transfer/prices" + ) + + def delete_vlan(self, vlan, region): + """ + This operation deletes a VLAN. + You can't delete a VLAN if it's still attached to a Linode. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/delete-vlan + + :param vlan: The label of the VLAN to be deleted. + :type vlan: str or VLAN + :param region: The VLAN's region. + :type region: str or Region + """ + if isinstance(region, Region): + region = region.id + + if isinstance(vlan, VLAN): + vlan = vlan.label + resp = self.client.delete( + "/networking/vlans/{}/{}".format(region, vlan), + model=self, + ) + + if "error" in resp: + return False + + return True diff --git a/linode_api4/groups/nodebalancer.py b/linode_api4/groups/nodebalancer.py new file mode 100644 index 000000000..57830c8c4 --- /dev/null +++ b/linode_api4/groups/nodebalancer.py @@ -0,0 +1,70 @@ +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import Base, NodeBalancer, NodeBalancerType + + +class NodeBalancerGroup(Group): + def __call__(self, *filters): + """ + Retrieves all of the NodeBalancers the acting user has access to. + + This is intended to be called off of the :any:`LinodeClient` + class, like this:: + + nodebalancers = client.nodebalancers() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-node-balancers + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of NodeBalancers the acting user can access. + :rtype: PaginatedList of NodeBalancers + """ + return self.client._get_and_filter(NodeBalancer, *filters) + + def create(self, region, **kwargs): + """ + Creates a new NodeBalancer in the given Region. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-node-balancer + + :param region: The Region in which to create the NodeBalancer. + :type region: Region or str + + :returns: The new NodeBalancer + :rtype: NodeBalancer + """ + params = { + "region": region.id if isinstance(region, Base) else region, + } + params.update(kwargs) + + result = self.client.post("/nodebalancers", data=params) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating Nodebalaner!", json=result + ) + + n = NodeBalancer(self.client, result["id"], result) + return n + + def types(self, *filters): + """ + Returns a :any:`PaginatedList` of :any:`NodeBalancerType` objects that represents a valid NodeBalancer type. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-node-balancer-types + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A Paginated List of NodeBalancer types that match the query. + :rtype: PaginatedList of NodeBalancerType + """ + + return self.client._get_and_filter( + NodeBalancerType, *filters, endpoint="/nodebalancers/types" + ) diff --git a/linode_api4/groups/object_storage.py b/linode_api4/groups/object_storage.py new file mode 100644 index 000000000..5ffab3ffc --- /dev/null +++ b/linode_api4/groups/object_storage.py @@ -0,0 +1,535 @@ +import re +import warnings +from typing import List, Optional, Union +from urllib import parse + +from deprecated import deprecated + +from linode_api4 import ( + ObjectStorageEndpoint, + ObjectStorageEndpointType, + ObjectStorageType, + PaginatedList, +) +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import ( + Base, + MappedObject, + ObjectStorageACL, + ObjectStorageBucket, + ObjectStorageCluster, + ObjectStorageKeyPermission, + ObjectStorageKeys, + ObjectStorageQuota, +) +from linode_api4.util import drop_null_keys + + +class ObjectStorageGroup(Group): + """ + This group encapsulates all endpoints under /object-storage, including viewing + available clusters, buckets, and managing keys and TLS/SSL certs, etc. + """ + + @deprecated( + reason="deprecated to use regions list API for listing available OJB clusters" + ) + def clusters(self, *filters): + """ + This endpoint will be deprecated to use the regions list API to list available OBJ clusters, + and a new access key API will directly expose the S3 endpoint hostname. + + Returns a list of available Object Storage Clusters. You may filter + this query to return only Clusters that are available in a specific region:: + + us_east_clusters = client.object_storage.clusters(ObjectStorageCluster.region == "us-east") + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-clusters + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Object Storage Clusters that matched the query. + :rtype: PaginatedList of ObjectStorageCluster + """ + return self.client._get_and_filter(ObjectStorageCluster, *filters) + + def keys(self, *filters): + """ + Returns a list of Object Storage Keys active on this account. These keys + allow third-party applications to interact directly with Linode Object Storage. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-keys + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Object Storage Keys that matched the query. + :rtype: PaginatedList of ObjectStorageKeys + """ + return self.client._get_and_filter(ObjectStorageKeys, *filters) + + def types(self, *filters): + """ + Returns a paginated list of Object Storage Types. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-types + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A Paginated List of Object Storage types that match the query. + :rtype: PaginatedList of ObjectStorageType + """ + + return self.client._get_and_filter( + ObjectStorageType, *filters, endpoint="/object-storage/types" + ) + + def keys_create( + self, + label: str, + bucket_access: Optional[Union[dict, List[dict]]] = None, + regions: Optional[List[str]] = None, + ): + """ + Creates a new Object Storage keypair that may be used to interact directly + with Linode Object Storage in third-party applications. This response is + the only time that "secret_key" will be populated - be sure to capture its + value or it will be lost forever. + + If given, `bucket_access` will cause the new keys to be restricted to only + the specified level of access for the specified buckets. For example, to + create a keypair that can only access the "example" bucket in all clusters + (and assuming you own that bucket in every cluster), you might do this:: + + client = LinodeClient(TOKEN) + + # look up clusters + all_clusters = client.object_storage.clusters() + + new_keys = client.object_storage.keys_create( + "restricted-keys", + bucket_access=[ + client.object_storage.bucket_access(cluster, "example", "read_write") + for cluster in all_clusters + ], + ) + + To create a keypair that can only read from the bucket "example2" in the + "us-east-1" cluster (an assuming you own that bucket in that cluster), + you might do this:: + + client = LinodeClient(TOKEN) + new_keys_2 = client.object_storage.keys_create( + "restricted-keys-2", + bucket_access=client.object_storage.bucket_access("us-east-1", "example2", "read_only"), + ) + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-object-storage-keys + + :param label: The label for this keypair, for identification only. + :type label: str + :param bucket_access: One or a list of dicts with keys "cluster," "region", + "permissions", and "bucket_name". "cluster" key is + deprecated because multiple cluster can be placed + in the same region. Please consider switching to + regions. If given, the resulting Object Storage keys + will only have the requested level of access to the + requested buckets, if they exist and are owned by + you. See the provided :any:`bucket_access` function + for a convenient way to create these dicts. + :type bucket_access: Optional[Union[dict, List[dict]]] + + :returns: The new keypair, with the secret key populated. + :rtype: ObjectStorageKeys + """ + params = {"label": label} + + if bucket_access is not None: + if not isinstance(bucket_access, list): + bucket_access = [bucket_access] + + ba = [] + for access_rule in bucket_access: + access_rule_json = { + "permissions": access_rule.get("permissions"), + "bucket_name": access_rule.get("bucket_name"), + } + + if "region" in access_rule: + access_rule_json["region"] = access_rule.get("region") + elif "cluster" in access_rule: + warnings.warn( + "'cluster' is a deprecated attribute, " + "please consider using 'region' instead.", + DeprecationWarning, + ) + access_rule_json["cluster"] = ( + access_rule.id + if "cluster" in access_rule + and issubclass(type(access_rule["cluster"]), Base) + else access_rule.get("cluster") + ) + + ba.append(access_rule_json) + + params["bucket_access"] = ba + + if regions is not None: + params["regions"] = regions + + result = self.client.post("/object-storage/keys", data=params) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating Object Storage Keys!", + json=result, + ) + + ret = ObjectStorageKeys(self.client, result["id"], result) + return ret + + @classmethod + def bucket_access( + cls, + cluster_or_region: str, + bucket_name: str, + permissions: Union[str, ObjectStorageKeyPermission], + ): + """ + Returns a dict formatted to be included in the `bucket_access` argument + of :any:`keys_create`. See the docs for that method for an example of + usage. + + :param cluster_or_region: The region or Object Storage cluster to grant access in. + :type cluster_or_region: str + :param bucket_name: The name of the bucket to grant access to. + :type bucket_name: str + :param permissions: The permissions to grant. Should be one of "read_only" + or "read_write". + :type permissions: Union[str, ObjectStorageKeyPermission] + :param use_region: Whether to use region mode. + :type use_region: bool + + :returns: A dict formatted correctly for specifying bucket access for + new keys. + :rtype: dict + """ + + result = { + "bucket_name": bucket_name, + "permissions": permissions, + } + + if cls.is_cluster(cluster_or_region): + warnings.warn( + "Cluster ID for Object Storage APIs has been deprecated. " + "Please consider switch to a region ID (e.g., from `us-mia-1` to `us-mia`)", + DeprecationWarning, + ) + result["cluster"] = cluster_or_region + else: + result["region"] = cluster_or_region + + return result + + def buckets_in_region(self, region: str, *filters): + """ + Returns a list of Buckets in the region belonging to this Account. + + This endpoint is available for convenience. + It is recommended that instead you use the more fully-featured S3 API directly. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-bucketin-cluster + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :param region: The ID of an object storage region (e.g. `us-mia-1`). + :type region: str + + :returns: A list of Object Storage Buckets that in the requested cluster. + :rtype: PaginatedList of ObjectStorageBucket + """ + + return self.client._get_and_filter( + ObjectStorageBucket, + *filters, + endpoint=f"/object-storage/buckets/{region}", + ) + + def cancel(self): + """ + Cancels Object Storage service. This may be a destructive operation. Once + cancelled, you will no longer receive the transfer for or be billed for + Object Storage, and all keys will be invalidated. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-cancel-object-storage + """ + self.client.post("/object-storage/cancel", data={}) + return True + + def transfer(self): + """ + The amount of outbound data transfer used by your accountโ€™s Object Storage buckets, + in bytes, for the current monthโ€™s billing cycle. Object Storage adds 1 terabyte + of outbound data transfer to your data transfer pool. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-transfer + + :returns: The amount of outbound data transfer used by your accountโ€™s Object + Storage buckets, in bytes, for the current monthโ€™s billing cycle. + :rtype: MappedObject + """ + result = self.client.get("/object-storage/transfer") + + if not "used" in result: + raise UnexpectedResponseError( + "Unexpected response when getting Transfer Pool!", + json=result, + ) + + return MappedObject(**result) + + def endpoints(self, *filters) -> PaginatedList: + """ + Returns a paginated list of all Object Storage endpoints available in your account. + + This is intended to be called from the :any:`LinodeClient` + class, like this:: + + endpoints = client.object_storage.endpoints() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-endpoints + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Object Storage Endpoints that matched the query. + :rtype: PaginatedList of ObjectStorageEndpoint + """ + return self.client._get_and_filter( + ObjectStorageEndpoint, + *filters, + endpoint="/object-storage/endpoints", + ) + + def buckets(self, *filters): + """ + Returns a paginated list of all Object Storage Buckets that you own. + This endpoint is available for convenience. + It is recommended that instead you use the more fully-featured S3 API directly. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-buckets + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Object Storage Buckets that matched the query. + :rtype: PaginatedList of ObjectStorageBucket + """ + return self.client._get_and_filter(ObjectStorageBucket, *filters) + + @staticmethod + def is_cluster(cluster_or_region: str): + return bool(re.match(r"^[a-z]{2}-[a-z]+-[0-9]+$", cluster_or_region)) + + def bucket_create( + self, + cluster_or_region: Union[str, ObjectStorageCluster], + label: str, + acl: ObjectStorageACL = ObjectStorageACL.PRIVATE, + cors_enabled=False, + s3_endpoint: Optional[str] = None, + endpoint_type: Optional[ObjectStorageEndpointType] = None, + ): + """ + Creates an Object Storage Bucket in the specified cluster. Accounts with + negative balances cannot access this command. If the bucket already exists + and is owned by you, this endpoint returns a 200 response with that bucket + as if it had just been created. + + This endpoint is available for convenience. + It is recommended that instead you use the more fully-featured S3 API directly. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-object-storage-bucket + + :param acl: The Access Control Level of the bucket using a canned ACL string. + For more fine-grained control of ACLs, use the S3 API directly. + :type acl: str + Enum: private,public-read,authenticated-read,public-read-write + + :param cluster: The ID of the Object Storage Cluster where this bucket + should be created. + :type cluster: str + + :param endpoint_type: The type of s3_endpoint available to the active user in this region. + :type endpoint_type: str + Enum: E0,E1,E2,E3 + + :param s3_endpoint: The active user's s3 endpoint URL, based on the endpoint_type and region. + :type s3_endpoint: str + + :param cors_enabled: If true, the bucket will be created with CORS enabled for + all origins. For more fine-grained controls of CORS, use + the S3 API directly. + :type cors_enabled: bool + + :param label: The name for this bucket. Must be unique in the cluster you are + creating the bucket in, or an error will be returned. Labels will + be reserved only for the cluster that active buckets are created + and stored in. If you want to reserve this bucketโ€™s label in + another cluster, you must create a new bucket with the same label + in the new cluster. + :type label: str + + :returns: A Object Storage Buckets that created by user. + :rtype: ObjectStorageBucket + """ + cluster_or_region_id = ( + cluster_or_region.id + if isinstance(cluster_or_region, ObjectStorageCluster) + else cluster_or_region + ) + + params = { + "label": label, + "acl": acl, + "cors_enabled": cors_enabled, + "s3_endpoint": s3_endpoint, + "endpoint_type": endpoint_type, + } + + if self.is_cluster(cluster_or_region_id): + warnings.warn( + "The cluster parameter has been deprecated for creating a object " + "storage bucket. Please consider switching to a region value. For " + "example, a cluster value of `us-mia-1` can be translated to a " + "region value of `us-mia`.", + DeprecationWarning, + ) + params["cluster"] = cluster_or_region_id + else: + params["region"] = cluster_or_region_id + + result = self.client.post("/object-storage/buckets", data=params) + + if not "label" in result or not "cluster" in result: + raise UnexpectedResponseError( + "Unexpected response when creating Object Storage Bucket!", + json=result, + ) + + return ObjectStorageBucket( + self.client, result["label"], result["cluster"], result + ) + + def object_acl_config(self, cluster_or_region_id: str, bucket, name=None): + return ObjectStorageBucket( + self.client, bucket, cluster_or_region_id + ).object_acl_config(name) + + def object_acl_config_update( + self, cluster_or_region_id, bucket, acl: ObjectStorageACL, name + ): + return ObjectStorageBucket( + self.client, bucket, cluster_or_region_id + ).object_acl_config_update(acl, name) + + def object_url_create( + self, + cluster_or_region_id, + bucket, + method, + name, + content_type=None, + expires_in=3600, + ): + """ + Creates a pre-signed URL to access a single Object in a bucket. + This can be used to share objects, and also to create/delete objects by using + the appropriate HTTP method in your request bodyโ€™s method parameter. + + This endpoint is available for convenience. + It is recommended that instead you use the more fully-featured S3 API directly. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-object-storage-object-url + + :param cluster_or_region_id: The ID of the cluster or region this bucket exists in. + :type cluster_or_region_id: str + + :param bucket: The bucket name. + :type bucket: str + + :param content_type: The expected Content-type header of the request this + signed URL will be valid for. If provided, the + Content-type header must be sent with the request when + this URL is used, and must be the same as it was when + the signed URL was created. + Required for all methods except โ€œGETโ€ or โ€œDELETEโ€. + :type content_type: str + + :param expires_in: How long this signed URL will be valid for, in seconds. + If omitted, the URL will be valid for 3600 seconds (1 hour). Defaults to 3600. + :type expires_in: int 360..86400 + + :param method: The HTTP method allowed to be used with the pre-signed URL. + :type method: str + + :param name: The name of the object that will be accessed with the pre-signed + URL. This object need not exist, and no error will be returned + if it doesnโ€™t. This behavior is useful for generating pre-signed + URLs to upload new objects to by setting the method to โ€œPUTโ€. + :type name: str + + :returns: The signed URL to perform the request at. + :rtype: MappedObject + """ + if method not in ("GET", "DELETE") and content_type is None: + raise ValueError( + "Content-type header is missing for the current method! It's required for all methods except GET or DELETE." + ) + params = { + "method": method, + "name": name, + "expires_in": expires_in, + "content_type": content_type, + } + + result = self.client.post( + "/object-storage/buckets/{}/{}/object-url".format( + parse.quote(str(cluster_or_region_id)), parse.quote(str(bucket)) + ), + data=drop_null_keys(params), + ) + + if not "url" in result: + raise UnexpectedResponseError( + "Unexpected response when creating the access url of an object!", + json=result, + ) + + return MappedObject(**result) + + def quotas(self, *filters): + """ + Lists the active ObjectStorage-related quotas applied to your account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-quotas + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Object Storage Quotas that matched the query. + :rtype: PaginatedList of ObjectStorageQuota + """ + return self.client._get_and_filter(ObjectStorageQuota, *filters) diff --git a/linode_api4/groups/placement.py b/linode_api4/groups/placement.py new file mode 100644 index 000000000..b1fa0f32b --- /dev/null +++ b/linode_api4/groups/placement.py @@ -0,0 +1,76 @@ +from typing import Union + +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects.placement import ( + PlacementGroup, + PlacementGroupPolicy, + PlacementGroupType, +) +from linode_api4.objects.region import Region + + +class PlacementAPIGroup(Group): + def groups(self, *filters): + """ + NOTE: Placement Groups may not currently be available to all users. + + Returns a list of Placement Groups on your account. You may filter + this query to return only Placement Groups that match specific criteria:: + + groups = client.placement.groups(PlacementGroup.label == "test") + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-placement-groups + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Placement Groups that matched the query. + :rtype: PaginatedList of PlacementGroup + """ + return self.client._get_and_filter(PlacementGroup, *filters) + + def group_create( + self, + label: str, + region: Union[Region, str], + placement_group_type: PlacementGroupType, + placement_group_policy: PlacementGroupPolicy, + **kwargs, + ) -> PlacementGroup: + """ + NOTE: Placement Groups may not currently be available to all users. + + Create a placement group with the specified parameters. + + :param label: The label for the placement group. + :type label: str + :param region: The region where the placement group will be created. Can be either a Region object or a string representing the region ID. + :type region: Union[Region, str] + :param placement_group_type: The type of the placement group. + :type placement_group_type: PlacementGroupType + :param placement_group_policy: The policy for assignments to this placement group. + :type placement_group_policy: PlacementGroupPolicy + + :returns: The new Placement Group. + :rtype: PlacementGroup + """ + params = { + "label": label, + "region": region.id if isinstance(region, Region) else region, + "placement_group_type": placement_group_type, + "placement_group_policy": placement_group_policy, + } + + params.update(kwargs) + + result = self.client.post("/placement/groups", data=params) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating Placement Group", json=result + ) + + d = PlacementGroup(self.client, result["id"], result) + return d diff --git a/linode_api4/groups/polling.py b/linode_api4/groups/polling.py new file mode 100644 index 000000000..8ef2c4feb --- /dev/null +++ b/linode_api4/groups/polling.py @@ -0,0 +1,91 @@ +from typing import Optional + +import polling + +from linode_api4.groups import Group +from linode_api4.polling import EventPoller, TimeoutContext + + +class PollingGroup(Group): + """ + This group contains various helper functions for polling on Linode events. + """ + + def event_poller_create( + self, + entity_type: str, + action: str, + entity_id: Optional[int] = None, + ) -> EventPoller: + """ + Creates a new instance of the EventPoller class. + + :param entity_type: The type of the entity to poll for events on. + Valid values for this field can be found here: https://techdocs.akamai.com/linode-api/reference/get-events + :type entity_type: str + :param action: The action that caused the Event to poll for. + Valid values for this field can be found here: https://techdocs.akamai.com/linode-api/reference/get-events + :type action: str + :param entity_id: The ID of the entity to poll for. + :type entity_id: int + :param poll_interval: The interval in seconds to wait between polls. + :type poll_interval: int + + :returns: The new EventPoller object. + :rtype: EventPoller + """ + + return EventPoller( + self.client, + entity_type, + action, + entity_id=entity_id, + ) + + def wait_for_entity_free( + self, + entity_type: str, + entity_id: int, + timeout: int = 240, + interval: int = 5, + ): + """ + Waits for all events relevant events to not be scheduled or in-progress. + + :param entity_type: The type of the entity to poll for events on. + Valid values for this field can be found here: https://techdocs.akamai.com/linode-api/reference/get-events + :type entity_type: str + :param entity_id: The ID of the entity to poll for. + :type entity_id: int + :param timeout: The timeout in seconds for this polling operation. + :type timeout: int + :param interval: The interval in seconds to wait between polls. + :type interval: int + """ + + timeout_ctx = TimeoutContext(timeout_seconds=timeout) + + api_filter = { + "+order": "desc", + "+order_by": "created", + "entity.id": entity_id, + "entity.type": entity_type, + } + + def poll_func(): + events = self.client.get("/account/events", filters=api_filter)[ + "data" + ] + return all( + event["status"] not in ("scheduled", "started") + for event in events + ) + + if poll_func(): + return + + polling.poll( + poll_func, + step=interval, + timeout=timeout_ctx.seconds_remaining, + ) diff --git a/linode_api4/groups/profile.py b/linode_api4/groups/profile.py new file mode 100644 index 000000000..ee583a1ac --- /dev/null +++ b/linode_api4/groups/profile.py @@ -0,0 +1,345 @@ +from datetime import datetime +from pathlib import Path + +from linode_api4 import UnexpectedResponseError +from linode_api4.common import SSH_KEY_TYPES +from linode_api4.groups import Group +from linode_api4.objects import ( + AuthorizedApp, + MappedObject, + PersonalAccessToken, + Profile, + ProfileLogin, + SSHKey, + TrustedDevice, +) + + +class ProfileGroup(Group): + """ + Collections related to your user. + """ + + def __call__(self): + """ + Retrieve the acting user's Profile, containing information about the + current user such as their email address, username, and uid. This is + intended to be called off of a :any:`LinodeClient` object, like this:: + + profile = client.profile() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-profile + + :returns: The acting user's profile. + :rtype: Profile + """ + result = self.client.get("/profile") + + if not "username" in result: + raise UnexpectedResponseError( + "Unexpected response when getting profile!", json=result + ) + + p = Profile(self.client, result["username"], result) + return p + + def trusted_devices(self): + """ + Returns the Trusted Devices on your profile. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-devices + + :returns: A list of Trusted Devices for this profile. + :rtype: PaginatedList of TrustedDevice + """ + return self.client._get_and_filter(TrustedDevice) + + def user_preferences(self): + """ + View a list of user preferences tied to the OAuth client that generated the token making the request. + """ + + result = self.client.get( + "{}/preferences".format(Profile.api_endpoint), model=self + ) + + return MappedObject(**result) + + def security_questions(self): + """ + Returns a collection of security questions and their responses, if any, for your User Profile. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-security-questions + """ + + result = self.client.get( + "{}/security-questions".format(Profile.api_endpoint), model=self + ) + + return MappedObject(**result) + + def security_questions_answer(self, questions): + """ + Adds security question responses for your User. Requires exactly three unique questions. + Previous responses are overwritten if answered or reset to null if unanswered. + + Example question: + { + "question_id": 11, + "response": "secret answer 3" + } + """ + + if len(questions) != 3: + raise ValueError("Exactly 3 security questions are required.") + + params = {"security_questions": questions} + + result = self.client.post( + "{}/security-questions".format(Profile.api_endpoint), + model=self, + data=params, + ) + + return MappedObject(**result) + + def user_preferences_update(self, **preferences): + """ + Updates a userโ€™s preferences. + """ + + result = self.client.put( + "{}/preferences".format(Profile.api_endpoint), + model=self, + data=preferences, + ) + + return MappedObject(**result) + + def phone_number_delete(self): + """ + Delete the verified phone number for the User making this request. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/delete-profile-phone-number + + :returns: Returns True if the operation was successful. + :rtype: bool + """ + + resp = self.client.delete( + "{}/phone-number".format(Profile.api_endpoint), model=self + ) + + if "error" in resp: + raise UnexpectedResponseError( + "Unexpected response when deleting phone number!", + json=resp, + ) + + return True + + def phone_number_verify(self, otp_code): + """ + Verify a phone number by confirming the one-time code received via SMS message + after accessing the Phone Verification Code Send (POST /profile/phone-number) command. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-profile-phone-number-verify + + :param otp_code: The one-time code received via SMS message after accessing the Phone Verification Code Send + :type otp_code: str + + :returns: Returns True if the operation was successful. + :rtype: bool + """ + + if not otp_code: + raise ValueError("OTP Code required to verify phone number.") + + params = {"otp_code": str(otp_code)} + + resp = self.client.post( + "{}/phone-number/verify".format(Profile.api_endpoint), + model=self, + data=params, + ) + + if "error" in resp: + raise UnexpectedResponseError( + "Unexpected response when verifying phone number!", + json=resp, + ) + + return True + + def phone_number_verification_code_send(self, iso_code, phone_number): + """ + Send a one-time verification code via SMS message to the submitted phone number. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-profile-phone-number + + :param iso_code: The two-letter ISO 3166 country code associated with the phone number. + :type iso_code: str + + :param phone_number: A valid phone number. + :type phone_number: str + + :returns: Returns True if the operation was successful. + :rtype: bool + """ + + if not iso_code: + raise ValueError("ISO Code required to send verification code.") + + if not phone_number: + raise ValueError("Phone Number required to send verification code.") + + params = {"iso_code": iso_code, "phone_number": phone_number} + + resp = self.client.post( + "{}/phone-number".format(Profile.api_endpoint), + model=self, + data=params, + ) + + if "error" in resp: + raise UnexpectedResponseError( + "Unexpected response when sending verification code!", + json=resp, + ) + + return True + + def logins(self): + """ + Returns the logins on your profile. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-profile-logins + + :returns: A list of logins for this profile. + :rtype: PaginatedList of ProfileLogin + """ + return self.client._get_and_filter(ProfileLogin) + + def tokens(self, *filters): + """ + Returns the Person Access Tokens active for this user. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-personal-access-tokens + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of tokens that matches the query. + :rtype: PaginatedList of PersonalAccessToken + """ + return self.client._get_and_filter(PersonalAccessToken, *filters) + + def token_create(self, label=None, expiry=None, scopes=None, **kwargs): + """ + Creates and returns a new Personal Access Token. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-personal-access-token + + :param label: The label of the new Personal Access Token. + :type label: str + :param expiry: When the new Personal Accses Token will expire. + :type expiry: datetime or str + :param scopes: A space-separated list of OAuth scopes for this token. + :type scopes: str + + :returns: The new Personal Access Token. + :rtype: PersonalAccessToken + """ + if label: + kwargs["label"] = label + if expiry: + if isinstance(expiry, datetime): + expiry = datetime.strftime(expiry, "%Y-%m-%dT%H:%M:%S") + kwargs["expiry"] = expiry + if scopes: + kwargs["scopes"] = scopes + + result = self.client.post("/profile/tokens", data=kwargs) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating Personal Access Token!", + json=result, + ) + + token = PersonalAccessToken(self.client, result["id"], result) + return token + + def apps(self, *filters): + """ + Returns the Authorized Applications for this user + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-profile-apps + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Authorized Applications for this user + :rtype: PaginatedList of AuthorizedApp + """ + return self.client._get_and_filter(AuthorizedApp, *filters) + + def ssh_keys(self, *filters): + """ + Returns the SSH Public Keys uploaded to your profile. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-ssh-keys + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of SSH Keys for this profile. + :rtype: PaginatedList of SSHKey + """ + return self.client._get_and_filter(SSHKey, *filters) + + def ssh_key_upload(self, key, label): + """ + Uploads a new SSH Public Key to your profile This key can be used in + later Linode deployments. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-add-ssh-key + + :param key: The ssh key, or a path to the ssh key. If a path is provided, + the file at the path must exist and be readable or an exception + will be thrown. + :type key: str + :param label: The name to give this key. This is purely aesthetic. + :type label: str + + :returns: The newly uploaded SSH Key + :rtype: SSHKey + :raises ValueError: If the key provided does not appear to be valid, and + does not appear to be a path to a valid key. + """ + if not key.startswith(SSH_KEY_TYPES): + # this might be a file path - look for it + key_path = Path(key).expanduser() + if key_path.is_file(): + with open(key_path) as f: + key = f.read().strip() + if not key.startswith(SSH_KEY_TYPES): + raise ValueError("Invalid SSH Public Key") + + params = { + "ssh_key": key, + "label": label, + } + + result = self.client.post("/profile/sshkeys", data=params) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when uploading SSH Key!", json=result + ) + + ssh_key = SSHKey(self.client, result["id"], result) + return ssh_key diff --git a/linode_api4/groups/region.py b/linode_api4/groups/region.py new file mode 100644 index 000000000..54bb37f0d --- /dev/null +++ b/linode_api4/groups/region.py @@ -0,0 +1,79 @@ +from linode_api4.groups import Group +from linode_api4.objects import Region +from linode_api4.objects.region import ( + RegionAvailabilityEntry, + RegionVPCAvailability, +) + + +class RegionGroup(Group): + def __call__(self, *filters): + """ + Returns the available Regions for Linode products. + + This is intended to be called off of the :any:`LinodeClient` + class, like this:: + + region = client.regions() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-regions + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of available Regions. + :rtype: PaginatedList of Region + """ + + return self.client._get_and_filter(Region, *filters) + + def availability(self, *filters): + """ + Returns the availability of Linode plans within a Region. + + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-regions-availability + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of entries describing the availability of a plan in a region. + :rtype: PaginatedList of RegionAvailabilityEntry + """ + + return self.client._get_and_filter( + RegionAvailabilityEntry, *filters, endpoint="/regions/availability" + ) + + def vpc_availability(self, *filters): + """ + Returns VPC availability data for all regions. + + NOTE: IPv6 VPCs may not currently be available to all users. + + This endpoint supports pagination with the following parameters: + - page: Page number (>= 1) + - page_size: Number of items per page (25-500) + + Pagination is handled automatically by PaginatedList. To configure page_size, + set it when creating the LinodeClient: + + client = LinodeClient(token, page_size=100) + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-regions-vpc-availability + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of VPC availability data for regions. + :rtype: PaginatedList of RegionVPCAvailability + """ + + return self.client._get_and_filter( + RegionVPCAvailability, + *filters, + endpoint="/regions/vpc-availability", + ) diff --git a/linode_api4/groups/support.py b/linode_api4/groups/support.py new file mode 100644 index 000000000..ccc0b154d --- /dev/null +++ b/linode_api4/groups/support.py @@ -0,0 +1,105 @@ +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import ( + VLAN, + Database, + Domain, + Firewall, + Instance, + LKECluster, + LongviewClient, + NodeBalancer, + SupportTicket, + Volume, +) + + +class SupportGroup(Group): + """ + Collections related to support tickets. + """ + + def tickets(self, *filters): + """ + Returns a list of support tickets on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-tickets + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of support tickets on this account. + :rtype: PaginatedList of SupportTicket + """ + + return self.client._get_and_filter(SupportTicket, *filters) + + def ticket_open( + self, + summary, + description, + managed_issue=False, + regarding=None, + **kwargs, + ): + """ + Opens a support ticket on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-ticket + + :param summary: The summary or title for this support ticket. + :type summary: str + :param description: The full details of the issue or question. + :type description: str + :param regarding: The resource being referred to in this ticket. + :type regarding: + :param managed_issue: Designates if this ticket relates to a managed service. + :type managed_issue: bool + + :returns: The new support ticket. + :rtype: SupportTicket + """ + params = { + "summary": summary, + "description": description, + "managed_issue": managed_issue, + } + + type_to_id = { + Instance: "linode_id", + Domain: "domain_id", + NodeBalancer: "nodebalancer_id", + Volume: "volume_id", + Firewall: "firewall_id", + LKECluster: "lkecluster_id", + Database: "database_id", + LongviewClient: "longviewclient_id", + } + + params.update(kwargs) + + if regarding: + id_attr = type_to_id.get(type(regarding)) + + if id_attr is not None: + params[id_attr] = regarding.id + elif isinstance(regarding, VLAN): + params["vlan"] = regarding.label + params["region"] = regarding.region + else: + raise ValueError( + "Cannot open ticket regarding type {}!".format( + type(regarding) + ) + ) + + result = self.client.post("/support/tickets", data=params) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating ticket!", json=result + ) + + t = SupportTicket(self.client, result["id"], result) + return t diff --git a/linode_api4/groups/tag.py b/linode_api4/groups/tag.py new file mode 100644 index 000000000..5948b513b --- /dev/null +++ b/linode_api4/groups/tag.py @@ -0,0 +1,116 @@ +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import Domain, Instance, NodeBalancer, Tag, Volume + + +class TagGroup(Group): + def __call__(self, *filters): + """ + Retrieves the Tags on your account. This may only be attempted by + unrestricted users. + + This is intended to be called off of the :any:`LinodeClient` + class, like this:: + + tags = client.tags() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-domain + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Tags on the account. + :rtype: PaginatedList of Tag + """ + return self.client._get_and_filter(Tag, *filters) + + def create( + self, + label, + instances=None, + domains=None, + nodebalancers=None, + volumes=None, + entities=[], + ): + """ + Creates a new Tag and optionally applies it to the given entities. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-tags + + :param label: The label for the new Tag + :type label: str + :param entities: A list of objects to apply this Tag to upon creation. + May only be taggable types (Linode Instances, Domains, + NodeBalancers, or Volumes). These are applied *in addition + to* any IDs specified with ``instances``, ``domains``, + ``nodebalancers``, or ``volumes``, and is a convenience + for sending multiple entity types without sorting them + yourself. + :type entities: list of Instance, Domain, NodeBalancer, and/or Volume + :param instances: A list of Linode Instances to apply this Tag to upon + creation + :type instances: list of Instance or list of int + :param domains: A list of Domains to apply this Tag to upon + creation + :type domains: list of Domain or list of int + :param nodebalancers: A list of NodeBalancers to apply this Tag to upon + creation + :type nodebalancers: list of NodeBalancer or list of int + :param volumes: A list of Volumes to apply this Tag to upon + creation + :type volumes: list of Volumes or list of int + + :returns: The new Tag + :rtype: Tag + """ + linode_ids, nodebalancer_ids, domain_ids, volume_ids = [], [], [], [] + + # filter input into lists of ids + sorter = zip( + (linode_ids, nodebalancer_ids, domain_ids, volume_ids), + (instances, nodebalancers, domains, volumes), + ) + + for id_list, input_list in sorter: + # if we got something, we need to find its ID + if input_list is not None: + for cur in input_list: + if isinstance(cur, int): + id_list.append(cur) + else: + id_list.append(cur.id) + + # filter entities into id lists too + type_map = { + Instance: linode_ids, + NodeBalancer: nodebalancer_ids, + Domain: domain_ids, + Volume: volume_ids, + } + + for e in entities: + if type(e) in type_map: + type_map[type(e)].append(e.id) + else: + raise ValueError("Unsupported entity type {}".format(type(e))) + + # finally, omit all id lists that are empty + params = { + "label": label, + "linodes": linode_ids or None, + "nodebalancers": nodebalancer_ids or None, + "domains": domain_ids or None, + "volumes": volume_ids or None, + } + + result = self.client.post("/tags", data=params) + + if not "label" in result: + raise UnexpectedResponseError( + "Unexpected response when creating Tag!", json=result + ) + + t = Tag(self.client, result["label"], result) + return t diff --git a/linode_api4/groups/volume.py b/linode_api4/groups/volume.py new file mode 100644 index 000000000..39d0aeaaa --- /dev/null +++ b/linode_api4/groups/volume.py @@ -0,0 +1,95 @@ +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import Volume, VolumeType +from linode_api4.objects.base import _flatten_request_body_recursive + + +class VolumeGroup(Group): + def __call__(self, *filters): + """ + Retrieves the Block Storage Volumes your user has access to. + + This is intended to be called off of the :any:`LinodeClient` + class, like this:: + + volumes = client.volumes() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-volumes + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Volumes the acting user can access. + :rtype: PaginatedList of Volume + """ + return self.client._get_and_filter(Volume, *filters) + + def create(self, label, region=None, linode=None, size=20, **kwargs): + """ + Creates a new Block Storage Volume, either in the given Region or + attached to the given Instance. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-volumes + + :param label: The label for the new Volume. + :type label: str + :param region: The Region to create this Volume in. Not required if + `linode` is provided. + :type region: Region or str + :param linode: The Instance to attach this Volume to. If not given, the + new Volume will not be attached to anything. + :type linode: Instance or int + :param size: The size, in GB, of the new Volume. Defaults to 20. + :type size: int + :param tags: A list of tags to apply to the new volume. If any of the + tags included do not exist, they will be created as part of + this operation. + :type tags: list[str] + :param encryption: Whether the new Volume should opt in or out of disk encryption. + :type encryption: str + Note: Block Storage Disk Encryption is not currently available to all users. + :returns: The new Volume. + :rtype: Volume + """ + if not (region or linode): + raise ValueError("region or linode required!") + + params = { + "label": label, + "size": size, + "region": region, + "linode_id": linode, + } + params.update(kwargs) + + result = self.client.post( + "/volumes", + data=_flatten_request_body_recursive(params), + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating volume!", json=result + ) + + v = Volume(self.client, result["id"], result) + return v + + def types(self, *filters): + """ + Returns a :any:`PaginatedList` of :any:`VolumeType` objects that represents a valid Volume type. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-volume-types + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A Paginated List of Volume types that match the query. + :rtype: PaginatedList of VolumeType + """ + + return self.client._get_and_filter( + VolumeType, *filters, endpoint="/volumes/types" + ) diff --git a/linode_api4/groups/vpc.py b/linode_api4/groups/vpc.py new file mode 100644 index 000000000..eda931292 --- /dev/null +++ b/linode_api4/groups/vpc.py @@ -0,0 +1,110 @@ +from typing import Any, Dict, List, Optional, Union + +from linode_api4.errors import UnexpectedResponseError +from linode_api4.groups import Group +from linode_api4.objects import VPC, Region, VPCIPAddress, VPCIPv6RangeOptions +from linode_api4.objects.base import _flatten_request_body_recursive +from linode_api4.paginated_list import PaginatedList +from linode_api4.util import drop_null_keys + + +class VPCGroup(Group): + def __call__(self, *filters) -> PaginatedList: + """ + Retrieves all of the VPCs the acting user has access to. + + This is intended to be called off of the :any:`LinodeClient` + class, like this:: + + vpcs = client.vpcs() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-vpcs + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of VPC the acting user can access. + :rtype: PaginatedList of VPC + """ + return self.client._get_and_filter(VPC, *filters) + + def create( + self, + label: str, + region: Union[Region, str], + description: Optional[str] = None, + subnets: Optional[List[Dict[str, Any]]] = None, + ipv6: Optional[List[Union[VPCIPv6RangeOptions, Dict[str, Any]]]] = None, + **kwargs, + ) -> VPC: + """ + Creates a new VPC under your Linode account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-vpc + + :param label: The label of the newly created VPC. + :type label: str + :param region: The region of the newly created VPC. + :type region: Union[Region, str] + :param description: The user-defined description of this VPC. + :type description: Optional[str] + :param subnets: A list of subnets to create under this VPC. + :type subnets: List[Dict[str, Any]] + :param ipv6: The IPv6 address ranges for this VPC. + :type ipv6: List[Union[VPCIPv6RangeOptions, Dict[str, Any]]] + + :returns: The new VPC object. + :rtype: VPC + """ + params = { + "label": label, + "region": region.id if isinstance(region, Region) else region, + "description": description, + "ipv6": ipv6, + "subnets": subnets, + } + + if subnets is not None and len(subnets) > 0: + for subnet in subnets: + if not isinstance(subnet, dict): + raise ValueError( + f"Unsupported type for subnet: {type(subnet)}" + ) + + params.update(kwargs) + + result = self.client.post( + "/vpcs", + data=drop_null_keys(_flatten_request_body_recursive(params)), + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating VPC", json=result + ) + + d = VPC(self.client, result["id"], result) + return d + + def ips(self, *filters) -> PaginatedList: + """ + Retrieves all of the VPC IP addresses for the current account matching the given filters. + + This is intended to be called from the :any:`LinodeClient` + class, like this:: + + vpc_ips = client.vpcs.ips() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-vpcs-ips + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of VPCIPAddresses the acting user can access. + :rtype: PaginatedList of VPCIPAddress + """ + return self.client._get_and_filter( + VPCIPAddress, *filters, endpoint="/vpcs/ips" + ) diff --git a/linode_api4/linode_client.py b/linode_api4/linode_client.py new file mode 100644 index 000000000..0e89142b3 --- /dev/null +++ b/linode_api4/linode_client.py @@ -0,0 +1,622 @@ +from __future__ import annotations + +import json +import logging +from importlib.metadata import version +from typing import BinaryIO, List, Optional, Tuple +from urllib import parse + +import requests +from requests.adapters import HTTPAdapter, Retry + +from linode_api4.errors import ApiError, UnexpectedResponseError +from linode_api4.groups import ( + AccountGroup, + BetaProgramGroup, + DatabaseGroup, + DomainGroup, + ImageGroup, + ImageShareGroupAPIGroup, + LinodeGroup, + LKEGroup, + LockGroup, + LongviewGroup, + MaintenanceGroup, + MetricsGroup, + MonitorGroup, + NetworkingGroup, + NodeBalancerGroup, + ObjectStorageGroup, + PollingGroup, + ProfileGroup, + RegionGroup, + SupportGroup, + TagGroup, + VolumeGroup, + VPCGroup, +) +from linode_api4.objects import Image, and_ + +from .groups.placement import PlacementAPIGroup +from .paginated_list import PaginatedList + +package_version = version("linode_api4") + +logger = logging.getLogger(__name__) + + +class LinearRetry(Retry): + """ + Linear retry is a subclass of Retry that uses a linear backoff strategy. + This is necessary to maintain backwards compatibility with the old retry system. + """ + + def get_backoff_time(self): + return self.backoff_factor + + +class BaseClient: + """ + The base class for a client. + + :param token: The authentication token to use for communication with the + API. Can be either a Personal Access Token or an OAuth Token. + :type token: str + :param base_url: The base URL for API requests. Generally, you shouldn't + change this. + :type base_url: str + :param user_agent: What to append to the User Agent of all requests made + by this client. Setting this allows Linode's internal + monitoring applications to track the usage of your + application. Setting this is not necessary, but some + applications may desire this behavior. + :type user_agent: str + :param page_size: The default size to request pages at. If not given, + the API's default page size is used. Valid values + can be found in the API docs, but at time of writing + are between 25 and 500. + :type page_size: int + :param retry: Whether API requests should automatically be retries on known + intermittent responses. + :type retry: bool + :param retry_rate_limit_interval: The amount of time to wait between HTTP request + retries. + :type retry_rate_limit_interval: Union[float, int] + :param retry_max: The number of request retries that should be attempted before + raising an API error. + :type retry_max: int + :type retry_statuses: List of int + :param retry_statuses: Additional HTTP response statuses to retry on. + By default, the client will retry on 408, 429, and 502 + responses. + :param ca_path: The path to a CA file to use for API requests in this client. + :type ca_path: str + """ + + def __init__( + self, + token, + base_url, + user_agent=None, + page_size=None, + retry=True, + retry_rate_limit_interval=1.0, + retry_max=5, + retry_statuses=None, + ca_path=None, + ): + self.base_url = base_url + self._add_user_agent = user_agent + self.token = token + self.page_size = page_size + self.ca_path = ca_path + + retry_forcelist = [408, 429, 502] + + if retry_statuses is not None: + retry_forcelist.extend(retry_statuses) + + # Ensure the max retries value is valid + if not isinstance(retry_max, int): + raise ValueError("retry_max must be an int") + + self.retry = retry + self.retry_rate_limit_interval = float(retry_rate_limit_interval) + self.retry_max = retry_max + self.retry_statuses = retry_forcelist + + # Initialize the HTTP client session + self.session = requests.Session() + + self._retry_config = LinearRetry( + total=retry_max if retry else 0, + status_forcelist=self.retry_statuses, + respect_retry_after_header=True, + backoff_factor=self.retry_rate_limit_interval, + raise_on_status=False, + # By default, POST is not an allowed method. + # We should explicitly include it. + allowed_methods={"DELETE", "GET", "POST", "PUT"}, + ) + retry_adapter = HTTPAdapter(max_retries=self._retry_config) + + self.session.mount("http://", retry_adapter) + self.session.mount("https://", retry_adapter) + + @property + def _user_agent(self): + return "{}python-linode_api4/{} {}".format( + "{} ".format(self._add_user_agent) if self._add_user_agent else "", + package_version, + requests.utils.default_user_agent(), + ) + + def load(self, target_type, target_id, target_parent_id=None): + """ + Constructs and immediately loads the object, circumventing the + lazy-loading scheme by immediately making an API request. Does not + load related objects. + + For example, if you wanted to load an :any:`Instance` object with ID 123, + you could do this:: + + loaded_linode = client.load(Instance, 123) + + Similarly, if you instead wanted to load a :any:`NodeBalancerConfig`, + you could do so like this:: + + loaded_nodebalancer_config = client.load(NodeBalancerConfig, 456, 432) + + :param target_type: The type of object to create. + :type target_type: type + :param target_id: The ID of the object to create. + :type target_id: int or str + :param target_parent_id: The parent ID of the object to create, if + applicable. + :type target_parent_id: int, str, or None + + :returns: The resulting object, fully loaded. + :rtype: target_type + :raise ApiError: if the requested object could not be loaded. + """ + result = target_type.make_instance( + target_id, self, parent_id=target_parent_id + ) + result._api_get() + + return result + + def _api_call( + self, endpoint, model=None, method=None, data=None, filters=None + ): + """ + Makes a call to the linode api. Data should only be given if the method is + POST or PUT, and should be a dictionary + """ + if not self.token: + raise RuntimeError("You do not have an API token!") + + if not method: + raise ValueError("Method is required for API calls!") + + if model: + endpoint = endpoint.format( + **{k: parse.quote(str(v)) for k, v in vars(model).items()} + ) + + url = "{}{}".format(self.base_url, endpoint) + headers = { + "Authorization": "Bearer {}".format(self.token), + "Content-Type": "application/json", + "User-Agent": self._user_agent, + } + + if filters: + headers["X-Filter"] = json.dumps(filters) + + body = None + if data is not None: + body = json.dumps(data) + + response = method( + url, + headers=headers, + data=body, + verify=self.ca_path or self.session.verify, + ) + + warning = response.headers.get("Warning", None) + if warning: + logger.warning("Received warning from server: {}".format(warning)) + + api_error = ApiError.from_response(response) + if api_error is not None: + raise api_error + + if response.status_code != 204: + j = response.json() + else: + j = None # handle no response body + + return j + + def _get_objects( + self, endpoint, cls, model=None, parent_id=None, filters=None + ): + # handle non-default page sizes + call_endpoint = endpoint + if self.page_size is not None: + call_endpoint += "?page_size={}".format(self.page_size) + + response_json = self.get(call_endpoint, model=model, filters=filters) + + if not "data" in response_json: + raise UnexpectedResponseError( + "Problem with response!", json=response_json + ) + + if "pages" in response_json: + formatted_endpoint = endpoint + if model: + formatted_endpoint = formatted_endpoint.format(**vars(model)) + return PaginatedList.make_paginated_list( + response_json, + self, + cls, + parent_id=parent_id, + page_url=formatted_endpoint[1:], + filters=filters, + ) + return PaginatedList.make_list( + response_json["data"], self, cls, parent_id=parent_id + ) + + def get(self, *args, **kwargs): + return self._api_call(*args, method=self.session.get, **kwargs) + + def post(self, *args, **kwargs): + return self._api_call(*args, method=self.session.post, **kwargs) + + def put(self, *args, **kwargs): + return self._api_call(*args, method=self.session.put, **kwargs) + + def delete(self, *args, **kwargs): + return self._api_call(*args, method=self.session.delete, **kwargs) + + def __setattr__(self, key, value): + # Allow for dynamic updating of the retry config + handlers = { + "retry_rate_limit_interval": lambda: setattr( + self._retry_config, "backoff_factor", value + ), + "retry": lambda: setattr( + self._retry_config, "total", self.retry_max if value else 0 + ), + "retry_max": lambda: setattr( + self._retry_config, "total", value if self.retry else 0 + ), + "retry_statuses": lambda: setattr( + self._retry_config, "status_forcelist", value + ), + } + + handler = handlers.get(key) + if hasattr(self, "_retry_config") and handler is not None: + handler() + + super().__setattr__(key, value) + + # helper functions + def _get_and_filter( + self, + obj_type, + *filters, + endpoint=None, + parent_id=None, + ): + parsed_filters = None + if filters: + if len(filters) > 1: + parsed_filters = and_( + *filters + ).dct # pylint: disable=no-value-for-parameter + else: + parsed_filters = filters[0].dct + + # Use sepcified endpoint + if endpoint: + return self._get_objects( + endpoint, obj_type, parent_id=parent_id, filters=parsed_filters + ) + else: + return self._get_objects( + obj_type.api_list(), + obj_type, + parent_id=parent_id, + filters=parsed_filters, + ) + + +class LinodeClient(BaseClient): + def __init__( + self, + token, + base_url="https://api.linode.com/v4", + user_agent=None, + page_size=None, + retry=True, + retry_rate_limit_interval=1.0, + retry_max=5, + retry_statuses=None, + ca_path=None, + ): + """ + The main interface to the Linode API. + + :param token: The authentication token to use for communication with the + API. Can be either a Personal Access Token or an OAuth Token. + :type token: str + :param base_url: The base URL for API requests. Generally, you shouldn't + change this. + :type base_url: str + :param user_agent: What to append to the User Agent of all requests made + by this client. Setting this allows Linode's internal + monitoring applications to track the usage of your + application. Setting this is not necessary, but some + applications may desire this behavior. + :type user_agent: str + :param page_size: The default size to request pages at. If not given, + the API's default page size is used. Valid values + can be found in the API docs, but at time of writing + are between 25 and 500. + :type page_size: int + :param retry: Whether API requests should automatically be retries on known + intermittent responses. + :type retry: bool + :param retry_rate_limit_interval: The amount of time to wait between HTTP request + retries. + :type retry_rate_limit_interval: Union[float, int] + :param retry_max: The number of request retries that should be attempted before + raising an API error. + :type retry_max: int + :type retry_statuses: List of int + :param retry_statuses: Additional HTTP response statuses to retry on. + By default, the client will retry on 408, 429, and 502 + responses. + :param ca_path: The path to a CA file to use for API requests in this client. + :type ca_path: str + """ + #: Access methods related to Linodes - see :any:`LinodeGroup` for + #: more information + self.linode = LinodeGroup(self) + + #: Access methods related to your user - see :any:`ProfileGroup` for + #: more information + self.profile = ProfileGroup(self) + + #: Access methods related to your account - see :any:`AccountGroup` for + #: more information + self.account = AccountGroup(self) + + #: Access methods related to networking on your account - see + #: :any:`NetworkingGroup` for more information + self.networking = NetworkingGroup(self) + + #: Access methods related to maintenance on your account - see + #: :any:`MaintenanceGroup` for more information + self.maintenance = MaintenanceGroup(self) + + #: Access methods related to support - see :any:`SupportGroup` for more + #: information + self.support = SupportGroup(self) + + #: Access information related to the Longview service - see + #: :any:`LongviewGroup` for more information + self.longview = LongviewGroup(self) + + #: Access methods related to Object Storage - see :any:`ObjectStorageGroup` + #: for more information + self.object_storage = ObjectStorageGroup(self) + + #: Access methods related to LKE - see :any:`LKEGroup` for more information. + self.lke = LKEGroup(self) + + #: Access methods related to Managed Databases - see :any:`DatabaseGroup` for more information. + self.database = DatabaseGroup(self) + + #: Access methods related to NodeBalancers - see :any:`NodeBalancerGroup` for more information. + self.nodebalancers = NodeBalancerGroup(self) + + #: Access methods related to Domains - see :any:`DomainGroup` for more information. + self.domains = DomainGroup(self) + + #: Access methods related to Tags - See :any:`TagGroup` for more information. + self.tags = TagGroup(self) + + #: Access methods related to Volumes - See :any:`VolumeGroup` for more information. + self.volumes = VolumeGroup(self) + + #: Access methods related to Regions - See :any:`RegionGroup` for more information. + self.regions = RegionGroup(self) + + #: Access methods related to Images - See :any:`ImageGroup` for more information. + self.images = ImageGroup(self) + + #: Access methods related to Image Share Groups - See :any:`ImageShareGroupAPIGroup` for more information. + self.sharegroups = ImageShareGroupAPIGroup(self) + + #: Access methods related to VPCs - See :any:`VPCGroup` for more information. + self.vpcs = VPCGroup(self) + + #: Access methods related to Event polling - See :any:`PollingGroup` for more information. + self.polling = PollingGroup(self) + + #: Access methods related to Beta Program - See :any:`BetaProgramGroup` for more information. + self.beta = BetaProgramGroup(self) + + #: Access methods related to VM placement - See :any:`PlacementAPIGroup` for more information. + self.placement = PlacementAPIGroup(self) + + self.monitor = MonitorGroup(self) + + #: Access methods related to Resource Locks - See :any:`LockGroup` for more information. + self.locks = LockGroup(self) + + super().__init__( + token=token, + base_url=base_url, + user_agent=user_agent, + page_size=page_size, + retry=retry, + retry_rate_limit_interval=retry_rate_limit_interval, + retry_max=retry_max, + retry_statuses=retry_statuses, + ca_path=ca_path, + ) + + def image_create(self, disk, label=None, description=None, tags=None): + """ + .. note:: This method is an alias to maintain backwards compatibility. + Please use :meth:`LinodeClient.images.create(...) <.ImageGroup.create>` for all new projects. + """ + return self.images.create( + disk, label=label, description=description, tags=tags + ) + + def image_create_upload( + self, + label: str, + region: str, + description: Optional[str] = None, + tags: Optional[List[str]] = None, + ) -> Tuple[Image, str]: + """ + .. note:: This method is an alias to maintain backwards compatibility. + Please use :meth:`LinodeClient.images.create_upload(...) <.ImageGroup.create_upload>` + for all new projects. + """ + + return self.images.create_upload( + label, region, description=description, tags=tags + ) + + def image_upload( + self, + label: str, + region: str, + file: BinaryIO, + description: Optional[str] = None, + tags: Optional[List[str]] = None, + ) -> Image: + """ + .. note:: This method is an alias to maintain backwards compatibility. + Please use :meth:`LinodeClient.images.upload(...) <.ImageGroup.upload>` for all new projects. + """ + return self.images.upload( + label, region, file, description=description, tags=tags + ) + + def nodebalancer_create(self, region, **kwargs): + """ + .. note:: This method is an alias to maintain backwards compatibility. + Please use + :meth:`LinodeClient.nodebalancers.create(...) <.NodeBalancerGroup.create>` + for all new projects. + """ + return self.nodebalancers.create(region, **kwargs) + + def domain_create(self, domain, master=True, **kwargs): + """ + .. note:: This method is an alias to maintain backwards compatibility. + Please use :meth:`LinodeClient.domains.create(...) <.DomainGroup.create>` for all + new projects. + """ + return self.domains.create(domain, master=master, **kwargs) + + def tag_create( + self, + label, + instances=None, + domains=None, + nodebalancers=None, + volumes=None, + entities=[], + ): + """ + .. note:: This method is an alias to maintain backwards compatibility. + Please use :meth:`LinodeClient.tags.create(...) <.TagGroup.create>` for all new projects. + """ + return self.tags.create( + label, + instances=instances, + domains=domains, + nodebalancers=nodebalancers, + volumes=volumes, + entities=entities, + ) + + def volume_create(self, label, region=None, linode=None, size=20, **kwargs): + """ + .. note:: This method is an alias to maintain backwards compatibility. + Please use :meth:`LinodeClient.volumes.create(...) <.VolumeGroup.create>` for all new projects. + """ + return self.volumes.create( + label, region=region, linode=linode, size=size, **kwargs + ) + + +class MonitorClient(BaseClient): + """ + The main interface to the Monitor API. + + :param token: The authentication Personal Access Token token to use for + communication with the API. You may want to generate one using + Linode Client. For example: + linode_client.monitor.create_token( + service_type="dbaas", entity_ids=[entity_id] + ) + :type token: str + :param base_url: The base URL for monitor API requests. Generally, you shouldn't + change this. + :type base_url: str + :param user_agent: What to append to the User Agent of all requests made + by this client. Setting this allows Linode's internal + monitoring applications to track the usage of your + application. Setting this is not necessary, but some + applications may desire this behavior. + :type user_agent: str + :param page_size: The default size to request pages at. If not given, + the API's default page size is used. Valid values + can be found in the API docs. + :type page_size: int + :param ca_path: The path to a CA file to use for API requests in this client. + :type ca_path: str + """ + + def __init__( + self, + token, + base_url="https://monitor-api.linode.com/v2beta", + user_agent=None, + page_size=None, + ca_path=None, + retry=True, + retry_rate_limit_interval=1.0, + retry_max=5, + retry_statuses=None, + ): + #: Access methods related to your monitor metrics - see :any:`MetricsGroup` for + #: more information + self.metrics = MetricsGroup(self) + + super().__init__( + token=token, + base_url=base_url, + user_agent=user_agent, + page_size=page_size, + retry=retry, + retry_rate_limit_interval=retry_rate_limit_interval, + retry_max=retry_max, + retry_statuses=retry_statuses, + ca_path=ca_path, + ) diff --git a/linode_api4/login_client.py b/linode_api4/login_client.py new file mode 100644 index 000000000..e21c5c4b2 --- /dev/null +++ b/linode_api4/login_client.py @@ -0,0 +1,519 @@ +import re +from datetime import datetime, timedelta +from enum import Enum + +import requests + +from linode_api4.errors import ApiError + +try: + from urllib.parse import urlencode, urlparse, urlunparse +except ImportError: + from urllib import urlencode + + from urlparse import urlparse, urlunparse + + +class AllWrapper: + def __repr__(self): + return "*" + + +class OAuthScopes: + """ + Represents the OAuth Scopes available to an application. In general, an + application should request no more scopes than it requires. This class + should be treated like a Enum, and used as follows:: + + required_scopes = [OAuthScopes.Linodes.all, OAuthScopes.Domains.read_only] + + Lists of OAuth Scopes are accepted when calling the :any:`generate_login_url` + method of the :any:`LinodeLoginClient`. + + All contained enumerations of OAuth Scopes have two levels, "read_only" and + "read_write". "read_only" access grants you the ability to get resources and + of that type, but not to change, create, or delete them. "read_write" access + allows to full access to resources of the requested type. In the above + example, you are requesting access to view, modify, create, and delete + Linodes, and to view Domains. + """ + + #: If necessary, an application may request all scopes by using OAuthScopes.all + all = AllWrapper() + + class Linodes(Enum): + """ + Access to Linodes + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "linodes:*" + return "linodes:{}".format(self.name) + + class Domains(Enum): + """ + Access to Domains + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "domains:*" + return "domains:{}".format(self.name) + + class StackScripts(Enum): + """ + Access to private StackScripts + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "stackscripts:*" + return "stackscripts:{}".format(self.name) + + class Users(Enum): + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "users:*" + return "users:{}".format(self.name) + + class NodeBalancers(Enum): + """ + Access to NodeBalancers + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "nodebalancers:*" + return "nodebalancers:{}".format(self.name) + + class Tokens(Enum): + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "tokens:*" + return "tokens:{}".format(self.name) + + class IPs(Enum): + """ + Access to IPs and networking managements + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "ips:*" + return "ips:{}".format(self.name) + + class Firewalls(Enum): + """ + Access to Firewalls + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "firewall:*" + return "firewall:{}".format(self.name) + + class Tickets(Enum): + """ + Access to view, open, and respond to Support Tickets + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "tickets:*" + return "tickets:{}".format(self.name) + + class Clients(Enum): + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "clients:*" + return "clients:{}".format(self.name) + + class Account(Enum): + """ + Access to the user's account, including billing information, tokens + management, user management, etc. + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "account:*" + return "account:{}".format(self.name) + + class Events(Enum): + """ + Access to a user's Events + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "events:*" + return "events:{}".format(self.name) + + class Volumes(Enum): + """ + Access to Block Storage Volumes + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "volumes:*" + return "volumes:{}".format(self.name) + + class LKE(Enum): + """ + Access to LKE Endpoint + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "lke:*" + return "lke:{}".format(self.name) + + class ObjectStorage(Enum): + """ + Access to Object Storage + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "object_storage:*" + return "object_storage:{}".format(self.name) + + class Longview(Enum): + """ + Access to Longview + """ + + read_only = 0 + read_write = 1 + all = 2 + + def __repr__(self): + if self.name == "all": + return "longview:*" + return "longview:{}".format(self.name) + + _scope_families = { + "linodes": Linodes, + "domains": Domains, + "stackscripts": StackScripts, + "users": Users, + "tokens": Tokens, + "ips": IPs, + "firewall": Firewalls, + "tickets": Tickets, + "clients": Clients, + "account": Account, + "events": Events, + "volumes": Volumes, + "lke": LKE, + "object_storage": ObjectStorage, + "nodebalancers": NodeBalancers, + "longview": Longview, + } + + @staticmethod + def parse(scopes): + ret = [] + + # special all-scope case + if scopes == "*": + return [ + getattr(scope, "all") + for scope in OAuthScopes._scope_families.values() + ] + + for scope in re.split("[, ]", scopes): + resource = access = None + if ":" in scope: + resource, access = scope.split(":") + else: + resource = scope + access = "*" + + parsed_scope = OAuthScopes._get_parsed_scope(resource, access) + if parsed_scope: + ret.append(parsed_scope) + + return ret + + @staticmethod + def _get_parsed_scope(resource, access): + resource = resource.lower() + access = access.lower() + if resource in OAuthScopes._scope_families: + if access == "*": + access = "all" + if hasattr(OAuthScopes._scope_families[resource], access): + return getattr(OAuthScopes._scope_families[resource], access) + + return None + + @staticmethod + def serialize(scopes): + ret = "" + if not type(scopes) is list: + scopes = [scopes] + for scope in scopes: + ret += "{},".format(repr(scope)) + if ret: + ret = ret[:-1] + return ret + + +class LinodeLoginClient: + def __init__( + self, + client_id, + client_secret, + base_url="https://login.linode.com", + ca_path=None, + ): + """ + Create a new LinodeLoginClient. These clients do not make any requests + on creation, and can safely be created and thrown away as needed. + + For complete usage information, see the :doc:`OAuth guide<../guides/oauth>`. + + :param client_id: The OAuth Client ID for this client. + :type client_id: str + :param client_secret: The OAuth Client Secret for this client. + :type client_secret: str + :param base_url: The URL for Linode's OAuth server. This should not be + changed. + :type base_url: str + :param ca_path: The path to the CA file to use for requests run by this client. + :type ca_path: str + """ + self.base_url = base_url + self.client_id = client_id + self.client_secret = client_secret + self.ca_path = ca_path + + def _login_uri(self, path): + return "{}{}".format(self.base_url, path) + + def generate_login_url(self, scopes=None, redirect_uri=None): + """ + Generates a url to send users so that they may authenticate to this + application. This url is suitable for redirecting a user to. For + example, in `Flask`_, a login route might be implemented like this:: + + @app.route("/login") + def begin_oauth_login(): + login_client = LinodeLoginClient(client_id, client_secret) + return redirect(login_client.generate_login_url()) + + .. _Flask:: http://flask.pocoo.org + + :param scopes: The OAuth scopes to request for this login. + :type scopes: list + :param redirect_uri: The requested redirect uri. The login service + enforces that this is under the registered redirect + path. + :type redirect_uri: str + + :returns: The uri to send users to for this login attempt. + :rtype: str + """ + url = self.base_url + "/oauth/authorize" + split = list(urlparse(url)) + params = { + "client_id": self.client_id, + "response_type": "code", # needed for all logins + } + if scopes: + params["scopes"] = OAuthScopes.serialize(scopes) + if redirect_uri: + params["redirect_uri"] = redirect_uri + split[4] = urlencode(params) + return urlunparse(split) + + def finish_oauth(self, code): + """ + Given an OAuth Exchange Code, completes the OAuth exchange with the + authentication server. This should be called once the user has already + been directed to the login_uri, and has been sent back after successfully + authenticating. For example, in `Flask`_, this might be implemented as + a route like this:: + + @app.route("/oauth-redirect") + def oauth_redirect(): + exchange_code = request.args.get("code") + login_client = LinodeLoginClient(client_id, client_secret) + + token, scopes, expiry, refresh_token = login_client.finish_oauth(exchange_code) + + # store the user's OAuth token in their session for later use + # and mark that they are logged in. + + return redirect("/") + + .. _Flask: http://flask.pocoo.org + + :param code: The OAuth Exchange Code returned from the authentication + server in the query string. + :type code: str + + :returns: The new OAuth token, and a list of scopes the token has, when + the token expires, and a refresh token that can generate a new + valid token when this one is expired. + :rtype: tuple(str, list, datetime, str) + + :raise ApiError: If the OAuth exchange fails. + """ + r = requests.post( + self._login_uri("/oauth/token"), + data={ + "code": code, + "client_id": self.client_id, + "client_secret": self.client_secret, + }, + verify=self.ca_path or True, + ) + + if r.status_code != 200: + raise ApiError.from_response( + r, + message="OAuth token exchange failed", + ) + + token = r.json()["access_token"] + scopes = OAuthScopes.parse(r.json()["scopes"]) + expiry = datetime.now() + timedelta(seconds=r.json()["expires_in"]) + refresh_token = r.json()["refresh_token"] + + return token, scopes, expiry, refresh_token + + def refresh_oauth_token(self, refresh_token): + """ + Some tokens are generated with refresh tokens (namely tokens generated + through an OAuth Exchange). These tokens may be renewed, or "refreshed", + with the auth server, generating a new OAuth Token with a new (later) + expiry. This method handles refreshing an OAuth Token using the refresh + token that was generated at the time of its issuance, and returns a new + OAuth token and refresh token for the same client and user. + + :param refresh_token: The refresh token returned for the OAuth Token we + are renewing. + :type refresh_token: str + + :returns: The new OAuth token, and a list of scopes the token has, when + the token expires, and a refresh token that can generate a new + valid token when this one is expired. + :rtype: tuple(str, list) + + :raise ApiError: If the refresh fails.. + """ + r = requests.post( + self._login_uri("/oauth/token"), + data={ + "grant_type": "refresh_token", + "client_id": self.client_id, + "client_secret": self.client_secret, + "refresh_token": refresh_token, + }, + verify=self.ca_path or True, + ) + + if r.status_code != 200: + raise ApiError.from_response(r, message="Refresh failed") + + token = r.json()["access_token"] + scopes = OAuthScopes.parse(r.json()["scopes"]) + expiry = datetime.now() + timedelta(seconds=r.json()["expires_in"]) + refresh_token = r.json()["refresh_token"] + + return token, scopes, expiry, refresh_token + + def expire_token(self, token): + """ + Given a token, makes a request to the authentication server to expire both + access token and refresh token. + This is considered a responsible way to log out a user. + If you remove only the session your application has for the + user without expiring their token, the user is not _really_ logged out. + + :param token: The OAuth token you wish to expire + :type token: str + + :returns: If the expiration attempt succeeded. + :rtype: bool + + :raises ApiError: If the expiration attempt failed. + """ + r = requests.post( + self._login_uri("/oauth/revoke"), + data={ + "token_type_hint": "access_token", + "client_id": self.client_id, + "client_secret": self.client_secret, + "token": token, + }, + verify=self.ca_path or True, + ) + + if r.status_code != 200: + raise ApiError.from_response(r, "Failed to expire token!") + return True diff --git a/linode_api4/objects/__init__.py b/linode_api4/objects/__init__.py new file mode 100644 index 000000000..89a681635 --- /dev/null +++ b/linode_api4/objects/__init__.py @@ -0,0 +1,28 @@ +# isort: skip_file +from .base import Base, Property, MappedObject, DATE_FORMAT, ExplicitNullValue +from .dbase import DerivedBase +from .serializable import JSONObject +from .filtering import and_, or_ +from .region import Region, Capability +from .image import Image +from .linode import * +from .linode_interfaces import * +from .volume import * +from .domain import * +from .account import * +from .networking import * +from .nodebalancer import * +from .support import * +from .profile import * +from .longview import * +from .tag import Tag +from .object_storage import * +from .lke import * +from .database import * +from .vpc import * +from .beta import * +from .placement import * +from .monitor import * +from .monitor_api import * +from .image_share_group import * +from .lock import * diff --git a/linode_api4/objects/account.py b/linode_api4/objects/account.py new file mode 100644 index 000000000..a4aca1848 --- /dev/null +++ b/linode_api4/objects/account.py @@ -0,0 +1,767 @@ +from __future__ import annotations + +from datetime import datetime + +import requests +from deprecated import deprecated + +from linode_api4.errors import ApiError, UnexpectedResponseError +from linode_api4.objects import DATE_FORMAT, Volume +from linode_api4.objects.base import Base, Property +from linode_api4.objects.database import Database +from linode_api4.objects.dbase import DerivedBase +from linode_api4.objects.domain import Domain +from linode_api4.objects.image import Image +from linode_api4.objects.linode import Instance, StackScript +from linode_api4.objects.longview import LongviewClient, LongviewSubscription +from linode_api4.objects.networking import Firewall +from linode_api4.objects.nodebalancer import NodeBalancer +from linode_api4.objects.profile import PersonalAccessToken +from linode_api4.objects.serializable import StrEnum +from linode_api4.objects.support import SupportTicket +from linode_api4.objects.volume import Volume +from linode_api4.objects.vpc import VPC + + +class Account(Base): + """ + The contact and billing information related to your Account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-account + """ + + api_endpoint = "/account" + id_attribute = "email" + + properties = { + "company": Property(mutable=True), + "country": Property(mutable=True), + "balance": Property(), + "address_1": Property(mutable=True), + "last_name": Property(mutable=True), + "city": Property(mutable=True), + "state": Property(mutable=True), + "first_name": Property(mutable=True), + "phone": Property(mutable=True), + "email": Property(mutable=True), + "zip": Property(mutable=True), + "address_2": Property(mutable=True), + "tax_id": Property(mutable=True), + "capabilities": Property(unordered=True), + "credit_card": Property(), + "active_promotions": Property(), + "active_since": Property(), + "balance_uninvoiced": Property(), + "billing_source": Property(), + "euuid": Property(), + } + + +class ChildAccount(Account): + """ + A child account under a parent account. + + NOTE: Parent/Child related features may not be generally available. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-child-account + """ + + api_endpoint = "/account/child-accounts/{euuid}" + id_attribute = "euuid" + + def create_token(self, **kwargs): + """ + Create an ephemeral token for accessing the child account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-child-account-token + """ + resp = self._client.post( + "{}/token".format(self.api_endpoint), + model=self, + data=kwargs, + ) + + if "errors" in resp: + raise UnexpectedResponseError( + "Unexpected response when creating a token for the child account!", + json=resp, + ) + + return PersonalAccessToken(self._client, resp["id"], resp) + + +class ServiceTransfer(Base): + """ + A transfer request for transferring a service between Linode accounts. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-service-transfer + """ + + api_endpoint = "/account/service-transfers/{token}" + id_attribute = "token" + properties = { + "token": Property(identifier=True), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "is_sender": Property(), + "expiry": Property(), + "status": Property(), + "entities": Property(), + } + + def service_transfer_accept(self): + """ + Accept a Service Transfer for the provided token to receive the services included in the transfer to your account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-accept-service-transfer + """ + + resp = self._client.post( + "{}/accept".format(self.api_endpoint), + model=self, + ) + + if "errors" in resp: + raise UnexpectedResponseError( + "Unexpected response when accepting service transfer!", + json=resp, + ) + + +class PaymentMethod(Base): + """ + A payment method to be used on this Linode account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-payment-method + """ + + api_endpoint = "/account/payment-methods/{id}" + properties = { + "id": Property(identifier=True), + "created": Property(is_datetime=True), + "is_default": Property(), + "type": Property(), + "data": Property(), + } + + def payment_method_make_default(self): + """ + Make this Payment Method the default method for automatically processing payments. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-make-payment-method-default + """ + + resp = self._client.post( + "{}/make-default".format(self.api_endpoint), + model=self, + ) + + if "errors" in resp: + raise UnexpectedResponseError( + "Unexpected response when making payment method default!", + json=resp, + ) + + +class Login(Base): + """ + A login entry for this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-account-login + """ + + api_endpoint = "/account/logins/{id}" + properties = { + "id": Property(identifier=True), + "datetime": Property(is_datetime=True), + "ip": Property(), + "restricted": Property(), + "status": Property(), + "username": Property(), + } + + +class AccountSettingsInterfacesForNewLinodes(StrEnum): + """ + A string enum corresponding to valid values + for the AccountSettings(...).interfaces_for_new_linodes field. + + NOTE: This feature may not currently be available to all users. + """ + + legacy_config_only = "legacy_config_only" + legacy_config_default_but_linode_allowed = ( + "legacy_config_default_but_linode_allowed" + ) + linode_default_but_legacy_config_allowed = ( + "linode_default_but_legacy_config_allowed" + ) + linode_only = "linode_only" + + +class AccountSettings(Base): + """ + Information related to your Account settings. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-account-settings + """ + + api_endpoint = "/account/settings" + id_attribute = "managed" # this isn't actually used + + properties = { + "network_helper": Property(mutable=True), + "managed": Property(), + "longview_subscription": Property( + slug_relationship=LongviewSubscription, mutable=False + ), + "object_storage": Property(), + "backups_enabled": Property(mutable=True), + "interfaces_for_new_linodes": Property(mutable=True), + "maintenance_policy": Property(mutable=True), + } + + +class Event(Base): + """ + An event object representing an event that took place on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-event + """ + + api_endpoint = "/account/events/{id}" + properties = { + "id": Property(identifier=True), + "percent_complete": Property(volatile=True), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "seen": Property(), + "read": Property(), + "action": Property(), + "user_id": Property(), + "username": Property(), + "entity": Property(), + "time_remaining": Property(), # Deprecated + "rate": Property(), + "status": Property(), + "duration": Property(), + "secondary_entity": Property(), + "message": Property(), + "maintenance_policy_set": Property(), + "description": Property(), + "source": Property(), + "not_before": Property(is_datetime=True), + "start_time": Property(is_datetime=True), + "complete_time": Property(is_datetime=True), + } + + @property + def linode(self): + """ + Returns the Linode Instance referenced by this event. + + :returns: The Linode Instance referenced by this event. + :rtype: Optional[Instance] + """ + + if self.entity and self.entity.type == "linode": + return Instance(self._client, self.entity.id) + return None + + @property + def stackscript(self): + """ + Returns the Linode StackScript referenced by this event. + + :returns: The Linode StackScript referenced by this event. + :rtype: Optional[StackScript] + """ + + if self.entity and self.entity.type == "stackscript": + return StackScript(self._client, self.entity.id) + return None + + @property + def domain(self): + """ + Returns the Linode Domain referenced by this event. + + :returns: The Linode Domain referenced by this event. + :rtype: Optional[Domain] + """ + + if self.entity and self.entity.type == "domain": + return Domain(self._client, self.entity.id) + return None + + @property + def nodebalancer(self): + """ + Returns the Linode NodeBalancer referenced by this event. + + :returns: The Linode NodeBalancer referenced by this event. + :rtype: Optional[NodeBalancer] + """ + + if self.entity and self.entity.type == "nodebalancer": + return NodeBalancer(self._client, self.entity.id) + return None + + @property + def ticket(self): + """ + Returns the Linode Support Ticket referenced by this event. + + :returns: The Linode Support Ticket referenced by this event. + :rtype: Optional[SupportTicket] + """ + + if self.entity and self.entity.type == "ticket": + return SupportTicket(self._client, self.entity.id) + return None + + @property + def volume(self): + """ + Returns the Linode Volume referenced by this event. + + :returns: The Linode Volume referenced by this event. + :rtype: Optional[Volume] + """ + + if self.entity and self.entity.type == "volume": + return Volume(self._client, self.entity.id) + return None + + @deprecated( + reason="`mark_read` API is deprecated. Use the 'mark_seen' " + "API instead. Please note that the `mark_seen` API functions " + "differently and will mark all events up to and including the " + "referenced event-id as 'seen' rather than individual events.", + ) + def mark_read(self): + """ + Marks a single Event as read. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-event-read + """ + + self._client.post("{}/read".format(Event.api_endpoint), model=self) + + def mark_seen(self): + """ + Marks a single Event as seen. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-event-seen + """ + + self._client.post("{}/seen".format(Event.api_endpoint), model=self) + + +class InvoiceItem(DerivedBase): + """ + An individual invoice item under an :any:`Invoice` object. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-invoice-items + """ + + api_endpoint = "/account/invoices/{invoice_id}/items" + derived_url_path = "items" + parent_id_name = "invoice_id" + id_attribute = "label" # this has to be something + + properties = { + "invoice_id": Property(identifier=True), + "unit_price": Property(), + "label": Property(), + "amount": Property(), + "quantity": Property(), + #'from_date': Property(is_datetime=True), this is populated below from the "from" attribute + "to": Property(is_datetime=True), + #'to_date': Property(is_datetime=True), this is populated below from the "to" attribute + "type": Property(), + } + + def _populate(self, json): + """ + Allows population of "from_date" from the returned "from" attribute which + is a reserved word in python. Also populates "to_date" to be complete. + """ + super()._populate(json) + + self.from_date = datetime.strptime(json["from"], DATE_FORMAT) + self.to_date = datetime.strptime(json["to"], DATE_FORMAT) + + +class Invoice(Base): + """ + A single invoice on this Linode account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-invoice + """ + + api_endpoint = "/account/invoices/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(), + "date": Property(is_datetime=True), + "total": Property(), + "items": Property(derived_class=InvoiceItem), + "tax": Property(), + "tax_summary": Property(), + "subtotal": Property(), + } + + +class OAuthClient(Base): + """ + An OAuthClient object that can be used to authenticate apps with this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-client + """ + + api_endpoint = "/account/oauth-clients/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "secret": Property(), + "redirect_uri": Property(mutable=True), + "status": Property(), + "public": Property(mutable=True), + "thumbnail_url": Property(), + } + + def reset_secret(self): + """ + Resets the client secret for this client. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-reset-client-secret + """ + result = self._client.post( + "{}/reset_secret".format(OAuthClient.api_endpoint), model=self + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when resetting secret!", json=result + ) + + self._populate(result) + return self.secret + + def thumbnail(self, dump_to=None): + """ + This returns binary data that represents a 128x128 image. + If dump_to is given, attempts to write the image to a file + at the given location. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-client-thumbnail + """ + headers = {"Authorization": "token {}".format(self._client.token)} + + result = requests.get( + "{}/{}/thumbnail".format( + self._client.base_url, + OAuthClient.api_endpoint.format(id=self.id), + ), + headers=headers, + ) + + if not result.status_code == 200: + raise ApiError.from_response( + result, + "No thumbnail found for OAuthClient {}".format(self.id), + disable_formatting=True, + ) + + if dump_to: + with open(dump_to, "wb+") as f: + f.write(result.content) + return result.content + + def set_thumbnail(self, thumbnail): + """ + Sets the thumbnail for this OAuth Client. If thumbnail is bytes, + uploads it as a png. Otherwise, assumes thumbnail is a path to the + thumbnail and reads it in as bytes before uploading. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/put-client-thumbnail + """ + headers = { + "Authorization": "token {}".format(self._client.token), + "Content-type": "image/png", + } + + # TODO this check needs to be smarter - python2 doesn't do it right + if not isinstance(thumbnail, bytes): + with open(thumbnail, "rb") as f: + thumbnail = f.read() + + result = requests.put( + "{}/{}/thumbnail".format( + self._client.base_url, + OAuthClient.api_endpoint.format(id=self.id), + ), + headers=headers, + data=thumbnail, + ) + + api_exc = ApiError.from_response(result) + if api_exc is not None: + raise api_exc + + return True + + +class Payment(Base): + """ + An object representing a single payment on the current Linode Account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-payment + """ + + api_endpoint = "/account/payments/{id}" + + properties = { + "id": Property(identifier=True), + "date": Property(is_datetime=True), + "usd": Property(), + } + + +class User(Base): + """ + An object representing a single user on this account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-user + """ + + api_endpoint = "/account/users/{id}" + id_attribute = "username" + + properties = { + "email": Property(), + "username": Property(identifier=True, mutable=True), + "user_type": Property(), + "restricted": Property(mutable=True), + "ssh_keys": Property(), + "tfa_enabled": Property(), + } + + @property + def grants(self): + """ + Retrieves the grants for this user. If the user is unrestricted, this + will result in an ApiError. This is smart, and will only fetch from the + api once unless the object is invalidated. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-user-grants + + :returns: The grants for this user. + :rtype: linode.objects.account.UserGrants + """ + from linode_api4.objects.account import ( # pylint: disable-all + UserGrants, + ) + + if not hasattr(self, "_grants"): + resp = self._client.get( + UserGrants.api_endpoint.format(username=self.username) + ) + + grants = UserGrants(self._client, self.username, resp) + self._set("_grants", grants) + + return self._grants + + def invalidate(self): + if hasattr(self, "_grants"): + del self._grants + Base.invalidate(self) + + +def get_obj_grants(): + """ + Returns Grant keys mapped to Object types. + """ + + return ( + ("linode", Instance), + ("domain", Domain), + ("stackscript", StackScript), + ("nodebalancer", NodeBalancer), + ("volume", Volume), + ("image", Image), + ("longview", LongviewClient), + ("database", Database), + ("firewall", Firewall), + ("vpc", VPC), + ) + + +class Grant: + """ + A Grant is a single grant a user has to an object. A Grant's entity is + an object on the account, such as a Linode, NodeBalancer, or Volume, and + its permissions level is one of None, "read_only" or "read_write". + + Grants cannot be accessed or updated individually, and are only relevant in + the context of a UserGrants object. + """ + + def __init__(self, client, cls, dct): + self._client = client + self.cls = cls + self.id = dct["id"] + self.label = dct["label"] + self.permissions = dct["permissions"] + + @property + def entity(self): + """ + Returns the object this grant is for. The objects type depends on the + type of object this grant is applied to, and the object returned is + not populated (accessing its attributes will trigger an api request). + + :returns: This grant's entity + :rtype: Linode, NodeBalancer, Domain, StackScript, Volume, or Longview + """ + # there are no grants for derived types, so this shouldn't happen + if not issubclass(self.cls, Base) or issubclass(self.cls, DerivedBase): + raise ValueError( + "Cannot get entity for non-base-class {}".format(self.cls) + ) + return self.cls(self._client, self.id) + + def _serialize(self, *args, **kwargs): + """ + Returns this grant in as JSON the api will accept. This is only relevant + in the context of UserGrants.save + """ + return {"permissions": self.permissions, "id": self.id} + + +class UserGrants: + """ + The UserGrants object represents the grants given to a restricted user. + Each section of grants has a list of objects and the level of grants this + user has to that object. + + This is not an instance of Base because it lacks most of the attributes of + a Base-like model (such as a unique, ID-based endpoint at which to access + it), however it has some similarities so that its usage is familiar. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-user-grants + """ + + api_endpoint = "/account/users/{username}/grants" + parent_id_name = "username" + + def __init__(self, client, username, json=None): + self._client = client + self.username = username + + if json is not None: + self._populate(json) + + def _populate(self, json): + self.global_grants = type("global_grants", (object,), json["global"]) + + for key, cls in get_obj_grants(): + if key in json: + lst = [] + for gdct in json[key]: + lst.append(Grant(self._client, cls, gdct)) + setattr(self, key, lst) + + @property + def _global_grants_dict(self): + """ + The global grants stored in this object. + """ + return { + k: v + for k, v in vars(self.global_grants).items() + if not k.startswith("_") + } + + @property + def _grants_dict(self): + """ + The grants stored in this object. + """ + grants = {} + for key, _ in get_obj_grants(): + if hasattr(self, key): + lst = [] + for cg in getattr(self, key): + lst.append(cg._serialize()) + grants[key] = lst + + return grants + + def _serialize(self, *args, **kwargs): + """ + Returns the user grants in as JSON the api will accept. + This is only relevant in the context of UserGrants.save + """ + return { + "global": self._global_grants_dict, + **self._grants_dict, + } + + def save(self): + """ + Applies the grants to the parent user. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/put-user-grants + """ + + req = self._serialize() + + result = self._client.put( + UserGrants.api_endpoint.format(username=self.username), data=req + ) + + self._populate(result) + + return True + + +class AccountBetaProgram(Base): + """ + The details and enrollment information of a Beta program that an account is enrolled in. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-enrolled-beta-program + """ + + api_endpoint = "/account/betas/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(), + "description": Property(), + "started": Property(is_datetime=True), + "ended": Property(is_datetime=True), + "enrolled": Property(is_datetime=True), + } + + +class AccountAvailability(Base): + """ + Contains information about the resources available for a region under the + current account. + + API doc: https://techdocs.akamai.com/linode-api/reference/get-account-availability + """ + + api_endpoint = "/account/availability/{region}" + id_attribute = "region" + + properties = { + "region": Property(identifier=True), + "unavailable": Property(unordered=True), + "available": Property(unordered=True), + } diff --git a/linode_api4/objects/base.py b/linode_api4/objects/base.py new file mode 100644 index 000000000..78e53fd45 --- /dev/null +++ b/linode_api4/objects/base.py @@ -0,0 +1,577 @@ +import time +from datetime import datetime, timedelta +from functools import cached_property +from typing import Any, Dict, Optional + +from linode_api4.objects.serializable import JSONObject + +from .filtering import FilterableMetaclass + +DATE_FORMAT = "%Y-%m-%dT%H:%M:%S" + + +# The interval to reload volatile properties +volatile_refresh_timeout = timedelta(seconds=15) + + +class ExplicitNullValue: + """ + An explicitly null value to set a property to. + Instances of `NullValue` differ from None as they will be explicitly + included in the resource PUT requests. + """ + + +class Property: + def __init__( + self, + mutable=False, + identifier=False, + volatile=False, + relationship=None, + derived_class=None, + is_datetime=False, + id_relationship=False, + slug_relationship=False, + nullable=False, + unordered=False, + json_object=None, + alias_of: Optional[str] = None, + ): + """ + A Property is an attribute returned from the API, and defines metadata + about that value. These are expected to be used as the values of a + class-level dict named 'properties' in subclasses of Base. + + :param mutable: This Property should be sent in a call to save() + :type mutable: bool + :param identifier: This Property identifies the object in the API + :type identifier: bool + :param volatile: Re-query for this Property if the local value is older than the + volatile refresh timeout + :type volatile: bool + :param relationship: The API Object this Property represents + :type relationship: type or None + :param derived_class: The sub-collection type this Property represents + :type derived_class: type or None + :param is_datetime: True if this Property should be parsed as a datetime.datetime + :type is_datetime: bool + :param id_relationship: This Property should create a relationship with this key as the ID + (This should be used on fields ending with '_id' only) + :type id_relationship: type or None + :param slug_relationship: This property is a slug related for a given type + :type slug_relationship: type or None + :param nullable: This property can be explicitly null on PUT requests + :type nullable: bool + :param unordered: The order of this property is not significant. + NOTE: This field is currently only for annotations purposes + and does not influence any update or decoding/encoding logic. + :type unordered: bool + :param json_object: The JSONObject class this property should be decoded into + :type json_object: type or None + :param alias_of: The original API attribute name when the property key is aliased. + This is useful when the API attribute name is a Python reserved word, + allowing you to use a different key while preserving the original name. + :type alias_of: str or None + """ + self.mutable = mutable + self.identifier = identifier + self.volatile = volatile + self.relationship = relationship + self.derived_class = derived_class + self.is_datetime = is_datetime + self.id_relationship = id_relationship + self.slug_relationship = slug_relationship + self.nullable = nullable + self.unordered = unordered + self.json_class = json_object + self.alias_of = alias_of + + +class MappedObject: + """ + Converts a dict into values accessible with the dot notation. + + object = { + "this": "that" + } + + becomes + + object.this # "that" + """ + + def __init__(self, **vals): + self._expand_vals(self.__dict__, **vals) + + def _expand_vals(self, target, **vals): + for v in vals: + if type(vals[v]) is dict: + vals[v] = MappedObject(**vals[v]) + elif type(vals[v]) is list: + # oh mama + vals[v] = [ + MappedObject(**i) if type(i) is dict else i for i in vals[v] + ] + target.update(vals) + + def __repr__(self): + return "Mapping containing {}".format(vars(self).keys()) + + @staticmethod + def _flatten_base_subclass(obj: "Base") -> Optional[Dict[str, Any]]: + if obj is None: + return None + + # If the object hasn't already been lazy-loaded, + # manually refresh it + if not getattr(obj, "_populated", False): + obj._api_get() + + return obj._raw_json + + @property + def dict(self): + return self._serialize() + + def _serialize(self, is_put: bool = False) -> Dict[str, Any]: + result = vars(self).copy() + cls = type(self) + + for k, v in result.items(): + if isinstance(v, cls): + result[k] = v.dict + elif isinstance(v, list): + result[k] = [ + ( + item._serialize(is_put=is_put) + if isinstance(item, (cls, JSONObject)) + else ( + self._flatten_base_subclass(item) + if isinstance(item, Base) + else item + ) + ) + for item in v + ] + elif isinstance(v, Base): + result[k] = self._flatten_base_subclass(v) + elif isinstance(v, JSONObject): + result[k] = v._serialize(is_put=is_put) + + return result + + +class Base(object, metaclass=FilterableMetaclass): + """ + The Base class knows how to look up api properties of a model, and lazy-load them. + """ + + properties = {} + + def __init__(self, client: object, id: object, json: object = {}) -> object: + self._set("_populated", False) + self._set("_last_updated", datetime.min) + self._set("_client", client) + self._set("_changed", False) + + #: self._raw_json is a copy of the json received from the API on population, + #: and cannot be relied upon to be current. Local changes to mutable fields + #: that have not been saved will not be present, and volatile fields will not + #: be updated on access. + self._set("_raw_json", None) + + for k, v in type(self).properties.items(): + if v.identifier: + continue + + self._set(k, None) + + self._set("id", id) + if hasattr(type(self), "id_attribute"): + self._set(getattr(type(self), "id_attribute"), id) + + self._populate(json) + + def __getattribute__(self, name): + """ + Handles lazy-loading/refreshing an object from the server, and + getting related objects, as defined in this object's 'properties' + """ + if name in type(self).properties.keys(): + # We are accessing a Property + if type(self).properties[name].identifier: + pass # don't load identifiers from the server, we have those + elif ( + object.__getattribute__(self, name) is None + and not self._populated + or type(self).properties[name].derived_class + ) or ( + type(self).properties[name].volatile + and object.__getattribute__(self, "_last_updated") + + volatile_refresh_timeout + < datetime.now() + ): + # needs to be loaded from the server + if type(self).properties[name].derived_class: + # load derived object(s) + self._set( + name, + type(self) + .properties[name] + .derived_class._api_get_derived( + self, getattr(self, "_client") + ), + ) + else: + self._api_get() + elif "{}_id".format(name) in type(self).properties.keys(): + # possible id-based relationship + related_type = ( + type(self).properties["{}_id".format(name)].id_relationship + ) + if related_type: + # no id, no related object + if not getattr(self, "{}_id".format(name)): + return None + # it is a relationship + relcache_name = "_{}_relcache".format(name) + if not hasattr(self, relcache_name): + self._set( + relcache_name, + related_type( + self._client, getattr(self, "{}_id".format(name)) + ), + ) + return object.__getattribute__(self, relcache_name) + + return object.__getattribute__(self, name) + + def __repr__(self): + """ + Returns a safe representation of this object without accessing the server + """ + return "{}: {}".format(type(self).__name__, self.id) + + def __setattr__(self, name, value): + """ + Enforces allowing editing of only Properties defined as mutable + """ + + if name in type(self).properties.keys(): + if not type(self).properties[name].mutable: + raise AttributeError( + "'{}' is not a mutable field of '{}'".format( + name, type(self).__name__ + ) + ) + + self._changed = True + + self._set(name, value) + + @cached_property + def properties_with_alias(self) -> dict[str, tuple[str, Property]]: + """ + Gets a dictionary of aliased properties for this object. + + :returns: A dict mapping original API attribute names to their alias names and + corresponding Property instances. + :rtype: dict[str, tuple[str, Property]] + """ + return { + prop.alias_of: (alias, prop) + for alias, prop in type(self).properties.items() + if prop.alias_of + } + + def save(self, force=True) -> bool: + """ + Send this object's mutable values to the server in a PUT request. + + :param force: If true, this method will always send a PUT request regardless of + whether the field has been explicitly updated. For optimization + purposes, this field should be set to false for typical update + operations. (Defaults to True) + :type force: bool + """ + if not force and not self._changed: + return False + + data = None + if not self._populated: + data = { + a: object.__getattribute__(self, a) + for a in type(self).properties + if type(self).properties[a].mutable + and object.__getattribute__(self, a) is not None + } + + for key, value in data.items(): + if ( + isinstance(value, ExplicitNullValue) + or value == ExplicitNullValue + ): + data[key] = None + + # Ensure we serialize any values that may not be already serialized + data = _flatten_request_body_recursive(data, is_put=True) + else: + data = self._serialize(is_put=True) + + resp = self._client.put(type(self).api_endpoint, model=self, data=data) + + if "error" in resp: + return False + + self._set("_changed", False) + + return True + + def delete(self): + """ + Sends a DELETE request for this object + """ + resp = self._client.delete(type(self).api_endpoint, model=self) + + if "error" in resp: + return False + self.invalidate() + return True + + def invalidate(self): + """ + Invalidates all non-identifier Properties this object has locally, + causing the next access to re-fetch them from the server + """ + for key in [ + k + for k in type(self).properties.keys() + if not type(self).properties[k].identifier + ]: + self._set(key, None) + + self._set("_populated", False) + + def _serialize(self, is_put: bool = False): + """ + A helper method to build a dict of all mutable Properties of + this object + """ + + result = {} + + # Aggregate mutable values into a dict + for k, v in type(self).properties.items(): + if not v.mutable: + continue + + value = getattr(self, k) + + if not v.nullable and (value is None or value == ""): + continue + + # Let's allow explicit null values as both classes and instances + if ( + isinstance(value, ExplicitNullValue) + or value == ExplicitNullValue + ): + value = None + + api_key = k if not v.alias_of else v.alias_of + result[api_key] = value + + # Resolve the underlying IDs of results + for k, v in result.items(): + result[k] = _flatten_request_body_recursive(v, is_put=is_put) + + return result + + def _api_get(self): + """ + A helper method to GET this object from the server + """ + json = self._client.get(type(self).api_endpoint, model=self) + self._populate(json) + + def _populate(self, json): + """ + A helper method that, given a JSON object representing this object, + assigns values based on the properties dict and the attributes of + its Properties. + """ + if not json: + return + + # hide the raw JSON away in case someone needs it + self._set("_raw_json", json) + self._set("_updated", False) + + valid_keys = set( + k + for k, v in type(self).properties.items() + if (not v.identifier) and (not v.alias_of) + ) | set(self.properties_with_alias.keys()) + + for api_key in json: + if api_key in valid_keys: + prop = type(self).properties.get(api_key) + prop_key = api_key + + if prop is None: + prop_key, prop = self.properties_with_alias[api_key] + + if prop.relationship and json[api_key] is not None: + if isinstance(json[api_key], list): + objs = [] + for d in json[api_key]: + if not "id" in d: + continue + new_class = prop.relationship + obj = new_class.make_instance( + d["id"], getattr(self, "_client") + ) + if obj: + obj._populate(d) + objs.append(obj) + self._set(prop_key, objs) + else: + if isinstance(json[api_key], dict): + related_id = json[api_key]["id"] + else: + related_id = json[api_key] + new_class = prop.relationship + obj = new_class.make_instance( + related_id, getattr(self, "_client") + ) + if obj and isinstance(json[api_key], dict): + obj._populate(json[api_key]) + self._set(prop_key, obj) + elif prop.slug_relationship and json[api_key] is not None: + # create an object of the expected type with the given slug + self._set( + prop_key, + prop.slug_relationship(self._client, json[api_key]), + ) + elif prop.json_class: + json_class = prop.json_class + json_value = json[api_key] + + # build JSON object + if isinstance(json_value, list): + # We need special handling for list responses + value = [json_class.from_json(v) for v in json_value] + else: + value = json_class.from_json(json_value) + + self._set(prop_key, value) + elif type(json[api_key]) is dict: + self._set(prop_key, MappedObject(**json[api_key])) + elif type(json[api_key]) is list: + # we're going to use MappedObject's behavior with lists to + # expand these, then grab the resulting value to set + mapping = MappedObject(_list=json[api_key]) + self._set( + prop_key, mapping._list + ) # pylint: disable=no-member + elif prop.is_datetime: + try: + t = time.strptime(json[api_key], DATE_FORMAT) + self._set( + prop_key, datetime.fromtimestamp(time.mktime(t)) + ) + except: + # if this came back, there's probably an issue with the + # python library; a field was marked as a datetime but + # wasn't in the expected format. + self._set(prop_key, json[api_key]) + else: + self._set(prop_key, json[api_key]) + + self._set("_populated", True) + self._set("_last_updated", datetime.now()) + + def _set(self, name, value): + """ + A helper method to set values of Properties without invoking + the overloaded __setattr__ + """ + object.__setattr__(self, name, value) + + @classmethod + def api_list(cls): + """ + Returns a URL that will produce a list of JSON objects + of this class' type + """ + return "/".join(cls.api_endpoint.split("/")[:-1]) + + @staticmethod + def make(id, client, cls, parent_id=None, json=None): + """ + Makes an api object based on an id and class. + + :param id: The id of the object to create + :param client: The LinodeClient to give the new object + :param cls: The class type to instantiate + :param parent_id: The parent id for derived classes + :param json: The JSON to use to populate the new class + + :returns: An instance of cls with the given id + """ + from .dbase import DerivedBase # pylint: disable-all + + if issubclass(cls, DerivedBase): + return cls(client, id, parent_id, json) + else: + return cls(client, id, json) + + @classmethod + def make_instance(cls, id, client, parent_id=None, json=None): + """ + Makes an instance of the class this is called on and returns it. + + The intended usage is: + instance = Linode.make_instance(123, client, json=response) + + :param cls: The class this was called on. + :param id: The id of the instance to create + :param client: The client to use for this instance + :param parent_id: The parent id for derived classes + :param json: The JSON to populate the instance with + + :returns: A new instance of this type, populated with json + """ + return Base.make(id, client, cls, parent_id=parent_id, json=json) + + +def _flatten_request_body_recursive(data: Any, is_put: bool = False) -> Any: + """ + This is a helper recursively flatten the given data for use in an API request body. + + NOTE: This helper does NOT raise an error if an attribute is + not known to be JSON serializable. + + :param data: Arbitrary data to flatten. + :return: The serialized data. + """ + + if isinstance(data, dict): + return { + k: _flatten_request_body_recursive(v, is_put=is_put) + for k, v in data.items() + } + + if isinstance(data, list): + return [_flatten_request_body_recursive(v, is_put=is_put) for v in data] + + if isinstance(data, Base): + return data.id + + if isinstance(data, ExplicitNullValue) or data == ExplicitNullValue: + return None + + if isinstance(data, MappedObject) or issubclass(type(data), JSONObject): + return data._serialize(is_put=is_put) + + return data diff --git a/linode_api4/objects/beta.py b/linode_api4/objects/beta.py new file mode 100644 index 000000000..45d5c5102 --- /dev/null +++ b/linode_api4/objects/beta.py @@ -0,0 +1,23 @@ +from linode_api4.objects import Base, Property + + +class BetaProgram(Base): + """ + Beta program is a new product or service that's not generally available to all customers. + User with permissions can enroll into a beta program and access the functionalities. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-beta-program + """ + + api_endpoint = "/betas/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(), + "description": Property(), + "started": Property(is_datetime=True), + "ended": Property(is_datetime=True), + "greenlight_only": Property(), + "more_info": Property(), + "beta_class": Property(alias_of="class"), + } diff --git a/linode_api4/objects/database.py b/linode_api4/objects/database.py new file mode 100644 index 000000000..b3c6f8c35 --- /dev/null +++ b/linode_api4/objects/database.py @@ -0,0 +1,576 @@ +from dataclasses import dataclass, field +from typing import Optional + +from linode_api4.objects import ( + Base, + JSONObject, + MappedObject, + Property, +) + + +class DatabaseType(Base): + """ + The type of a managed database. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-type + """ + + api_endpoint = "/databases/types/{id}" + + properties = { + "deprecated": Property(), + "disk": Property(), + "engines": Property(), + "id": Property(identifier=True), + "label": Property(), + "memory": Property(), + "vcpus": Property(), + # type_class is populated from the 'class' attribute of the returned JSON + } + + def _populate(self, json): + """ + Allows changing the name "class" in JSON to "type_class" in python + """ + super()._populate(json) + + if "class" in json: + setattr(self, "type_class", json["class"]) + else: + setattr(self, "type_class", None) + + +class DatabaseEngine(Base): + """ + A managed database engine. The following database engines are available on Linodeโ€™s platform: + + - MySQL + - PostgreSQL + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-engine + """ + + api_endpoint = "/databases/engines/{id}" + + properties = { + "id": Property(identifier=True), + "engine": Property(), + "version": Property(), + } + + def invalidate(self): + """ + Clear out cached properties. + """ + + for attr in ["_instance"]: + if hasattr(self, attr): + delattr(self, attr) + + Base.invalidate(self) + + +@dataclass +class DatabasePrivateNetwork(JSONObject): + """ + DatabasePrivateNetwork is used to specify + a Database Cluster's private network settings during its creation. + """ + + vpc_id: Optional[int] = None + subnet_id: Optional[int] = None + public_access: Optional[bool] = None + + +@dataclass +class MySQLDatabaseConfigMySQLOptions(JSONObject): + """ + MySQLDatabaseConfigMySQLOptions represents the fields in the mysql + field of the MySQLDatabaseConfigOptions class + """ + + connect_timeout: Optional[int] = None + default_time_zone: Optional[str] = None + group_concat_max_len: Optional[float] = None + information_schema_stats_expiry: Optional[int] = None + innodb_change_buffer_max_size: Optional[int] = None + innodb_flush_neighbors: Optional[int] = None + innodb_ft_min_token_size: Optional[int] = None + innodb_ft_server_stopword_table: Optional[str] = None + innodb_lock_wait_timeout: Optional[int] = None + innodb_log_buffer_size: Optional[int] = None + innodb_online_alter_log_max_size: Optional[int] = None + innodb_read_io_threads: Optional[int] = None + innodb_rollback_on_timeout: Optional[bool] = None + innodb_thread_concurrency: Optional[int] = None + innodb_write_io_threads: Optional[int] = None + interactive_timeout: Optional[int] = None + internal_tmp_mem_storage_engine: Optional[str] = None + max_allowed_packet: Optional[int] = None + max_heap_table_size: Optional[int] = None + net_buffer_length: Optional[int] = None + net_read_timeout: Optional[int] = None + net_write_timeout: Optional[int] = None + sort_buffer_size: Optional[int] = None + sql_mode: Optional[str] = None + sql_require_primary_key: Optional[bool] = None + tmp_table_size: Optional[int] = None + wait_timeout: Optional[int] = None + + +@dataclass +class MySQLDatabaseConfigOptions(JSONObject): + """ + MySQLDatabaseConfigOptions is used to specify + a MySQL Database Cluster's configuration options during its creation. + """ + + mysql: Optional[MySQLDatabaseConfigMySQLOptions] = None + binlog_retention_period: Optional[int] = None + + +@dataclass +class PostgreSQLDatabaseConfigPGLookoutOptions(JSONObject): + """ + PostgreSQLDatabasePGLookoutConfigOptions represents the fields in the pglookout + field of the PostgreSQLDatabasePGConfigOptions class + """ + + max_failover_replication_time_lag: Optional[int] = None + + +@dataclass +class PostgreSQLDatabaseConfigPGOptions(JSONObject): + """ + PostgreSQLDatabasePGConfigOptions represents the fields in the pg + field of the PostgreSQLDatabasePGConfigOptions class + """ + + autovacuum_analyze_scale_factor: Optional[float] = None + autovacuum_analyze_threshold: Optional[int] = None + autovacuum_max_workers: Optional[int] = None + autovacuum_naptime: Optional[int] = None + autovacuum_vacuum_cost_delay: Optional[int] = None + autovacuum_vacuum_cost_limit: Optional[int] = None + autovacuum_vacuum_scale_factor: Optional[float] = None + autovacuum_vacuum_threshold: Optional[int] = None + bgwriter_delay: Optional[int] = None + bgwriter_flush_after: Optional[int] = None + bgwriter_lru_maxpages: Optional[int] = None + bgwriter_lru_multiplier: Optional[float] = None + deadlock_timeout: Optional[int] = None + default_toast_compression: Optional[str] = None + idle_in_transaction_session_timeout: Optional[int] = None + jit: Optional[bool] = None + max_files_per_process: Optional[int] = None + max_locks_per_transaction: Optional[int] = None + max_logical_replication_workers: Optional[int] = None + max_parallel_workers: Optional[int] = None + max_parallel_workers_per_gather: Optional[int] = None + max_pred_locks_per_transaction: Optional[int] = None + max_replication_slots: Optional[int] = None + max_slot_wal_keep_size: Optional[int] = None + max_stack_depth: Optional[int] = None + max_standby_archive_delay: Optional[int] = None + max_standby_streaming_delay: Optional[int] = None + max_wal_senders: Optional[int] = None + max_worker_processes: Optional[int] = None + password_encryption: Optional[str] = None + pg_partman_bgw_interval: Optional[int] = field( + default=None, metadata={"json_key": "pg_partman_bgw.interval"} + ) + pg_partman_bgw_role: Optional[str] = field( + default=None, metadata={"json_key": "pg_partman_bgw.role"} + ) + pg_stat_monitor_pgsm_enable_query_plan: Optional[bool] = field( + default=None, + metadata={"json_key": "pg_stat_monitor.pgsm_enable_query_plan"}, + ) + pg_stat_monitor_pgsm_max_buckets: Optional[int] = field( + default=None, metadata={"json_key": "pg_stat_monitor.pgsm_max_buckets"} + ) + pg_stat_statements_track: Optional[str] = field( + default=None, metadata={"json_key": "pg_stat_statements.track"} + ) + temp_file_limit: Optional[int] = None + timezone: Optional[str] = None + track_activity_query_size: Optional[int] = None + track_commit_timestamp: Optional[str] = None + track_functions: Optional[str] = None + track_io_timing: Optional[str] = None + wal_sender_timeout: Optional[int] = None + wal_writer_delay: Optional[int] = None + + +@dataclass +class PostgreSQLDatabaseConfigOptions(JSONObject): + """ + PostgreSQLDatabaseConfigOptions is used to specify + a PostgreSQL Database Cluster's configuration options during its creation. + """ + + pg: Optional[PostgreSQLDatabaseConfigPGOptions] = None + pg_stat_monitor_enable: Optional[bool] = None + pglookout: Optional[PostgreSQLDatabaseConfigPGLookoutOptions] = None + shared_buffers_percentage: Optional[float] = None + work_mem: Optional[int] = None + + +class MySQLDatabase(Base): + """ + An accessible Managed MySQL Database. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-mysql-instance + """ + + api_endpoint = "/databases/mysql/instances/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "allow_list": Property(mutable=True, unordered=True), + "cluster_size": Property(mutable=True), + "created": Property(is_datetime=True), + "encrypted": Property(), + "engine": Property(), + "hosts": Property(), + "port": Property(), + "region": Property(), + "ssl_connection": Property(), + "status": Property(volatile=True), + "type": Property(mutable=True), + "fork": Property(), + "oldest_restore_time": Property(is_datetime=True), + "updated": Property(volatile=True, is_datetime=True), + "updates": Property(mutable=True), + "version": Property(), + "engine_config": Property( + mutable=True, json_object=MySQLDatabaseConfigOptions + ), + "private_network": Property( + mutable=True, json_object=DatabasePrivateNetwork, nullable=True + ), + } + + @property + def credentials(self): + """ + Display the root username and password for an accessible Managed MySQL Database. + The Database must have an active status to perform this command. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-mysql-instance-credentials + + :returns: MappedObject containing credntials for this DB + :rtype: MappedObject + """ + + if not hasattr(self, "_credentials"): + resp = self._client.get( + "{}/credentials".format(MySQLDatabase.api_endpoint), model=self + ) + self._set("_credentials", MappedObject(**resp)) + + return self._credentials + + @property + def ssl(self): + """ + Display the SSL CA certificate for an accessible Managed MySQL Database. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-mysql-instance-ssl + + :returns: MappedObject containing SSL CA certificate for this DB + :rtype: MappedObject + """ + + if not hasattr(self, "_ssl"): + resp = self._client.get( + "{}/ssl".format(MySQLDatabase.api_endpoint), model=self + ) + self._set("_ssl", MappedObject(**resp)) + + return self._ssl + + def credentials_reset(self): + """ + Reset the root password for a Managed MySQL Database. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-databases-mysql-instance-credentials-reset + + :returns: Response from the API call to reset credentials + :rtype: dict + """ + + self.invalidate() + + return self._client.post( + "{}/credentials/reset".format(MySQLDatabase.api_endpoint), + model=self, + ) + + def patch(self): + """ + Apply security patches and updates to the underlying operating system of the Managed MySQL Database. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-databases-mysql-instance-patch + + :returns: Response from the API call to apply security patches + :rtype: dict + """ + + self.invalidate() + + return self._client.post( + "{}/patch".format(MySQLDatabase.api_endpoint), model=self + ) + + def invalidate(self): + """ + Clear out cached properties. + """ + + for attr in ["_ssl", "_credentials"]: + if hasattr(self, attr): + delattr(self, attr) + + Base.invalidate(self) + + def suspend(self): + """ + Suspend a MySQL Managed Database, releasing idle resources and keeping only necessary data. + + API documentation: https://techdocs.akamai.com/linode-api/reference/suspend-databases-mysql-instance + """ + self._client.post( + "{}/suspend".format(MySQLDatabase.api_endpoint), model=self + ) + + return self.invalidate() + + def resume(self): + """ + Resume a suspended MySQL Managed Database. + + API documentation: https://techdocs.akamai.com/linode-api/reference/resume-databases-mysql-instance + """ + self._client.post( + "{}/resume".format(MySQLDatabase.api_endpoint), model=self + ) + + return self.invalidate() + + +class PostgreSQLDatabase(Base): + """ + An accessible Managed PostgreSQL Database. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-postgre-sql-instance + """ + + api_endpoint = "/databases/postgresql/instances/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "allow_list": Property(mutable=True, unordered=True), + "cluster_size": Property(mutable=True), + "created": Property(is_datetime=True), + "encrypted": Property(), + "engine": Property(), + "hosts": Property(), + "port": Property(), + "region": Property(), + "ssl_connection": Property(), + "status": Property(volatile=True), + "type": Property(mutable=True), + "fork": Property(), + "oldest_restore_time": Property(is_datetime=True), + "updated": Property(volatile=True, is_datetime=True), + "updates": Property(mutable=True), + "version": Property(), + "engine_config": Property( + mutable=True, json_object=PostgreSQLDatabaseConfigOptions + ), + "private_network": Property( + mutable=True, json_object=DatabasePrivateNetwork, nullable=True + ), + } + + @property + def credentials(self): + """ + Display the root username and password for an accessible Managed PostgreSQL Database. + The Database must have an active status to perform this command. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-postgre-sql-instance-credentials + + :returns: MappedObject containing credntials for this DB + :rtype: MappedObject + """ + + if not hasattr(self, "_credentials"): + resp = self._client.get( + "{}/credentials".format(PostgreSQLDatabase.api_endpoint), + model=self, + ) + self._set("_credentials", MappedObject(**resp)) + + return self._credentials + + @property + def ssl(self): + """ + Display the SSL CA certificate for an accessible Managed PostgreSQL Database. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-databases-postgresql-instance-ssl + + :returns: MappedObject containing SSL CA certificate for this DB + :rtype: MappedObject + """ + + if not hasattr(self, "_ssl"): + resp = self._client.get( + "{}/ssl".format(PostgreSQLDatabase.api_endpoint), model=self + ) + self._set("_ssl", MappedObject(**resp)) + + return self._ssl + + def credentials_reset(self): + """ + Reset the root password for a Managed PostgreSQL Database. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-databases-postgre-sql-instance-credentials-reset + + :returns: Response from the API call to reset credentials + :rtype: dict + """ + + self.invalidate() + + return self._client.post( + "{}/credentials/reset".format(PostgreSQLDatabase.api_endpoint), + model=self, + ) + + def patch(self): + """ + Apply security patches and updates to the underlying operating system of the Managed PostgreSQL Database. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-databases-postgre-sql-instance-patch + + :returns: Response from the API call to apply security patches + :rtype: dict + """ + + self.invalidate() + + return self._client.post( + "{}/patch".format(PostgreSQLDatabase.api_endpoint), model=self + ) + + def invalidate(self): + """ + Clear out cached properties. + """ + + for attr in ["_ssl", "_credentials"]: + if hasattr(self, attr): + delattr(self, attr) + + Base.invalidate(self) + + def suspend(self): + """ + Suspend a PostgreSQL Managed Database, releasing idle resources and keeping only necessary data. + + API documentation: https://techdocs.akamai.com/linode-api/reference/suspend-databases-postgre-sql-instance + """ + self._client.post( + "{}/suspend".format(PostgreSQLDatabase.api_endpoint), model=self + ) + + return self.invalidate() + + def resume(self): + """ + Resume a suspended PostgreSQL Managed Database. + + API documentation: https://techdocs.akamai.com/linode-api/reference/resume-databases-postgre-sql-instance + """ + self._client.post( + "{}/resume".format(PostgreSQLDatabase.api_endpoint), model=self + ) + + return self.invalidate() + + +ENGINE_TYPE_TRANSLATION = { + "mysql": MySQLDatabase, + "postgresql": PostgreSQLDatabase, +} + + +class Database(Base): + """ + A generic Database instance. + + Note: This class does not have a corresponding GET endpoint. For detailed information + about the database, use the .instance() property method instead. + """ + + api_endpoint = "/databases/instances/{id}" + + properties = { + "id": Property(), + "label": Property(), + "allow_list": Property(unordered=True), + "cluster_size": Property(), + "created": Property(), + "encrypted": Property(), + "engine": Property(), + "hosts": Property(), + "instance_uri": Property(), + "region": Property(), + "status": Property(), + "type": Property(), + "fork": Property(), + "updated": Property(), + "updates": Property(), + "version": Property(), + "private_network": Property( + json_object=DatabasePrivateNetwork, nullable=True + ), + } + + @property + def instance(self): + """ + Returns the underlying database object for the corresponding database + engine. This is useful for performing operations on generic databases. + + The following is an example of printing credentials for all databases regardless of engine:: + + client = LinodeClient(TOKEN) + + databases = client.database.instances() + + for db in databases: + print(f"{db.hosts.primary}: {db.instance.credentials.username} {db.instance.credentials.password}") + """ + + if not hasattr(self, "_instance"): + if self.engine not in ENGINE_TYPE_TRANSLATION: + return None + + self._set( + "_instance", + ENGINE_TYPE_TRANSLATION[self.engine](self._client, self.id), + ) + + return self._instance + + # Since this class doesn't have a corresponding GET endpoint, this prevents an accidental call to the nonexisting endpoint. + def _api_get(self): + return diff --git a/linode_api4/objects/dbase.py b/linode_api4/objects/dbase.py new file mode 100644 index 000000000..b6e288769 --- /dev/null +++ b/linode_api4/objects/dbase.py @@ -0,0 +1,27 @@ +from linode_api4.objects import Base + + +class DerivedBase(Base): + """ + The DerivedBase class holds information about an object who belongs to another object + (for example, a disk belongs to a linode). These objects have their own endpoints, + but they are below another object in the hierarchy (i.e. /linodes/lnde_123/disks/disk_123) + """ + + derived_url_path = "" # override in child classes + parent_id_name = "parent_id" # override in child classes + + def __init__(self, client, id, parent_id, json={}): + self._set(type(self).parent_id_name, parent_id) + + Base.__init__(self, client, id, json=json) + + @classmethod + def _api_get_derived(cls, parent, client): + base_url = "{}/{}".format( + type(parent).api_endpoint, cls.derived_url_path + ) + + return client._get_objects( + base_url, cls, model=parent, parent_id=parent.id + ) diff --git a/linode_api4/objects/domain.py b/linode_api4/objects/domain.py new file mode 100644 index 000000000..8ce7a5ee4 --- /dev/null +++ b/linode_api4/objects/domain.py @@ -0,0 +1,160 @@ +from linode_api4.errors import UnexpectedResponseError +from linode_api4.objects import Base, DerivedBase, Property + + +class DomainRecord(DerivedBase): + """ + A single record on a Domain. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-domain-record + """ + + api_endpoint = "/domains/{domain_id}/records/{id}" + derived_url_path = "records" + parent_id_name = "domain_id" + + properties = { + "id": Property(identifier=True), + "domain_id": Property(identifier=True), + "type": Property(), + "name": Property(mutable=True), + "target": Property(mutable=True), + "priority": Property(mutable=True), + "weight": Property(mutable=True), + "port": Property(mutable=True), + "service": Property(mutable=True), + "protocol": Property(mutable=True), + "ttl_sec": Property(mutable=True), + "tag": Property(mutable=True), + "created": Property(), + "updated": Property(), + } + + +class Domain(Base): + """ + A single Domain that you have registered in Linodeโ€™s DNS Manager. + Linode is not a registrar, and in order for this Domain record to work + you must own the domain and point your registrar at Linodeโ€™s nameservers. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-domain + """ + + api_endpoint = "/domains/{id}" + properties = { + "id": Property(identifier=True), + "domain": Property(mutable=True), + "group": Property(mutable=True), + "description": Property(mutable=True), + "status": Property(mutable=True), + "soa_email": Property(mutable=True), + "retry_sec": Property(mutable=True), + "master_ips": Property(mutable=True, unordered=True), + "axfr_ips": Property(mutable=True, unordered=True), + "expire_sec": Property(mutable=True), + "refresh_sec": Property(mutable=True), + "ttl_sec": Property(mutable=True), + "records": Property(derived_class=DomainRecord), + "type": Property(mutable=True), + "tags": Property(mutable=True, unordered=True), + } + + def record_create(self, record_type, **kwargs): + """ + Adds a new Domain Record to the zonefile this Domain represents. + Each domain can have up to 12,000 active records. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-domain-record + + :param record_type: The type of Record this is in the DNS system. Can be one of: + A, AAAA, NS, MX, CNAME, TXT, SRV, PTR, CAA. + :type: record_type: str + + :param kwargs: Additional optional parameters for creating a domain record. Valid parameters + are: name, target, priority, weight, port, service, protocol, ttl_sec. Descriptions + of these parameters can be found in the API Documentation above. + :type: record_type: dict + + :returns: The newly created Domain Record + :rtype: DomainRecord + """ + + params = { + "type": record_type, + } + params.update(kwargs) + + result = self._client.post( + "{}/records".format(Domain.api_endpoint), model=self, data=params + ) + self.invalidate() + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response creating domain record!", json=result + ) + + zr = DomainRecord(self._client, result["id"], self.id, result) + return zr + + def zone_file_view(self): + """ + Returns the zone file for the last rendered zone for the specified domain. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-domain-zone + + :returns: The zone file for the last rendered zone for the specified domain in the form + of a list of the lines of the zone file. + :rtype: List[str] + """ + + result = self._client.get( + "{}/zone-file".format(self.api_endpoint), model=self + ) + + return result["zone_file"] + + def clone(self, domain: str): + """ + Clones a Domain and all associated DNS records from a Domain that is registered in Linodeโ€™s DNS manager. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-clone-domain + + :param domain: The new domain for the clone. Domain labels cannot be longer + than 63 characters and must conform to RFC1035. Domains must be + unique on Linodeโ€™s platform, including across different Linode + accounts; there cannot be two Domains representing the same domain. + :type: domain: str + """ + params = {"domain": domain} + + result = self._client.post( + "{}/clone".format(self.api_endpoint), model=self, data=params + ) + + return Domain(self, result["id"], result) + + def domain_import(self, domain, remote_nameserver): + """ + Imports a domain zone from a remote nameserver. Your nameserver must + allow zone transfers (AXFR) from the following IPs: + - 96.126.114.97 + - 96.126.114.98 + - 2600:3c00::5e + = 2600:3c00::5f + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-import-domain + + :param domain: The domain to import. + :type: domain: str + + :param remote_nameserver: The remote nameserver that allows zone transfers (AXFR). + :type: remote_nameserver: str + """ + + params = { + "domain": domain.domain if isinstance(domain, Domain) else domain, + "remote_nameserver": remote_nameserver, + } + + self._client.post("/domains/import", model=self, data=params) diff --git a/linode_api4/objects/filtering.py b/linode_api4/objects/filtering.py new file mode 100644 index 000000000..3616eb505 --- /dev/null +++ b/linode_api4/objects/filtering.py @@ -0,0 +1,251 @@ +""" +.. module:: linode + +Collections returned by the :any:`LinodeClient` can be filtered using a +SQLAlchemy-like syntax. When calling any "get" method of the :any:`LinodeClient` +class of one of its groups, any number of filters may be passed in as boolean +comparisons between attributes of the model returned by the collection. + +When filtering on API responses for list endpoints, you will first need +to import the corresponding object class. +For example, to filter on instances you must first Import :any:`Instance`:: + + from linode_api4 import Instance + +For example, calling :any:`instances` returns a list of :any:`Instance` +objects, so we can use properties of :any:`Instance` to filter the results:: + + # returns all Instances in the "prod" group + client.linode.instances(Instance.group == "prod") + +You can use any boolean comparisons when filtering collections:: + + # returns all Instances _not_ in us-east-1a + client.linode.instances(Instance.region != "us-east-1a") + +You can combine filters to be even more specific - by default all filters are +considered:: + + # returns all Instances in the "prod" group that are in us-east-1a + client.linode.instances(Instance.group == "prod", + Instance.region == "us-east-1a") + +If you need to combine the results of two filters, you can use :any:`or_` to define +this relationship:: + + # returns all Instance in either the "prod" or "staging" groups + client.linode.instances(or_(Instance.group == "prod", + Instance.group == "staging")) + +:any:`and_` is also available in case you need to do deeply-nested comparisons:: + + # returns all Instances in the group "staging" and any Instances in the "prod" + # group that are located in "us-east-1a" + client.linode.instances(or_(Instance.group == "staging", + and_(Instance.group == "prod", + Instance.region == "us-east-1a")) + +""" + + +def or_(a, b): + """ + Combines two :any:`Filters` with an "or" operation, matching + any results that match any of the given filters. + + :param a: The first filter to consider. + :type a: Filter + :param b: The second filter to consider. + :type b: Filter + + :returns: A filter that matches either a or b + :rtype: Filter + """ + if not isinstance(a, Filter) or not isinstance(b, Filter): + raise TypeError + return a.__or__(b) + + +def and_(a, b): + """ + Combines two :any:`Filters` with an "and" operation, matching + any results that match both of the given filters. + + :param a: The first filter to consider. + :type a: Filter + :param b: The second filter to consider. + :type b: Filter + + :returns: A filter that matches both a and b + :rtype: Filter + """ + return a.__and__(b) + + +def order_by(field, desc=False): + """ + Allows ordering of results. You may only ever order a collection's results + once in a given request. For example:: + + # sort results by Instances group + client.linode.instances(order_by(Instance.group)) + + :param field: The field to order results by. Must be a filterable attribute + of the model. + :type field: FilterableAttribute + :param desc: If True, return results in descending order. Defaults to False + :type desc: bool + + :returns: A filter that will order results as requested. + :rtype: Filter + """ + return Filter({}).order_by(field, desc) + + +def limit(amount): + """ + Allows limiting of results in a collection. You may only ever apply a limit + once per request. For example:: + + # returns my first 5 Instances + client.linode.instances(limit(5)) + + :param amount: The number of results to return. + :type amount: int + + :returns: A filter that will limit the number of results returned. + :rtype: Filter + """ + return Filter({}).limit(amount) + + +class Filter: + """ + A Filter represents a comparison to send to the API. These should not be + constructed normally, but instead should be returned from comparisons + between class attributes of filterable classes (see above). Filters can + be combined with :any:`and_` and :any:`or_`. + """ + + def __init__(self, dct): + self.dct = dct + + def __or__(self, other): + if not isinstance(other, Filter): + raise TypeError("You can only or Filter types!") + if "+or" in self.dct: + return Filter({"+or": self.dct["+or"] + [other.dct]}) + else: + return Filter({"+or": [self.dct, other.dct]}) + + def __and__(self, other): + if not isinstance(other, Filter): + raise TypeError("You can only and Filter types!") + if "+and" in self.dct: + return Filter({"+and": self.dct["+and"] + [other.dct]}) + else: + return Filter({"+and": [self.dct, other.dct]}) + + def order_by(self, field, desc=False): + # we can't include two order_bys + if "+order_by" in self.dct: + raise AssertionError("You may only order by once!") + + if not isinstance(field, FilterableAttribute): + raise TypeError("Can only order by filterable attributes!") + + self.dct["+order_by"] = field.name + if desc: + self.dct["+order"] = "desc" + + return self + + def limit(self, limit): + # we can't limit twice + if "+limit" in self.dct: + raise AssertionError("You may only limit once!") + + if not type(limit) == int: + raise TypeError("Limit must be an int!") + + self.dct["+limit"] = limit + + return self + + +class FilterableAttribute: + def __init__(self, name): + self.name = name + + def __eq__(self, other): + return Filter({self.name: other}) + + def __ne__(self, other): + return Filter({self.name: {"+neq": other}}) + + # "in" evaluates the return value - have to use + # type.contains instead + def contains(self, other): + return Filter({self.name: {"+contains": other}}) + + def __gt__(self, other): + return Filter({self.name: {"+gt": other}}) + + def __lt__(self, other): + return Filter({self.name: {"+lt": other}}) + + def __ge__(self, other): + return Filter({self.name: {"+gte": other}}) + + def __le__(self, other): + return Filter({self.name: {"+lte": other}}) + + +class NonFilterableAttribute: + def __init__(self, clsname, atrname): + self.clsname = clsname + self.atrname = atrname + + def __eq__(self, other): + raise AttributeError( + "{} cannot be filtered by {}".format(self.clsname, self.atrname) + ) + + def __ne__(self, other): + raise AttributeError( + "{} cannot be filtered by {}".format(self.clsname, self.atrname) + ) + + def contains(self, other): + raise AttributeError( + "{} cannot be filtered by {}".format(self.clsname, self.atrname) + ) + + def __gt__(self, other): + raise AttributeError( + "{} cannot be filtered by {}".format(self.clsname, self.atrname) + ) + + def __lt__(self, other): + raise AttributeError( + "{} cannot be filtered by {}".format(self.clsname, self.atrname) + ) + + def __ge__(self, other): + raise AttributeError( + "{} cannot be filtered by {}".format(self.clsname, self.atrname) + ) + + def __le__(self, other): + raise AttributeError( + "{} cannot be filtered by {}".format(self.clsname, self.atrname) + ) + + +class FilterableMetaclass(type): + def __init__(cls, name, bases, dct): + if hasattr(cls, "properties"): + for key in cls.properties.keys(): + setattr(cls, key, FilterableAttribute(key)) + + super().__init__(name, bases, dct) diff --git a/linode_api4/objects/image.py b/linode_api4/objects/image.py new file mode 100644 index 000000000..50dc23f74 --- /dev/null +++ b/linode_api4/objects/image.py @@ -0,0 +1,123 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +from linode_api4.objects import Base, Property, Region +from linode_api4.objects.serializable import JSONObject, StrEnum + + +class ReplicationStatus(StrEnum): + """ + The Enum class represents image replication status. + """ + + pending_replication = "pending replication" + pending_deletion = "pending deletion" + available = "available" + creating = "creating" + pending = "pending" + replicating = "replicating" + + +@dataclass +class ImageRegion(JSONObject): + """ + The region and status of an image replica. + """ + + include_none_values = True + + region: str = "" + status: Optional[ReplicationStatus] = None + + +@dataclass +class ImageSharingSharedWith(JSONObject): + """ + Data representing who an Image has been shared with. + """ + + sharegroup_count: Optional[int] = None + sharegroup_list_url: Optional[str] = None + + +@dataclass +class ImageSharingSharedBy(JSONObject): + """ + Data representing who shared an Image. + """ + + sharegroup_id: Optional[int] = None + sharegroup_uuid: Optional[str] = None + sharegroup_label: Optional[str] = None + source_image_id: Optional[str] = None + + +@dataclass +class ImageSharing(JSONObject): + """ + The Image Sharing status of an Image. + """ + + shared_with: Optional[ImageSharingSharedWith] = None + shared_by: Optional[ImageSharingSharedBy] = None + + +class Image(Base): + """ + An Image is something a Linode Instance or Disk can be deployed from. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-image + """ + + api_endpoint = "/images/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "description": Property(mutable=True), + "eol": Property(is_datetime=True), + "expiry": Property(is_datetime=True), + "status": Property(), + "created": Property(is_datetime=True), + "created_by": Property(), + "updated": Property(is_datetime=True), + "type": Property(), + "is_public": Property(), + "is_shared": Property(), + "vendor": Property(), + "size": Property(), + "deprecated": Property(), + "capabilities": Property( + unordered=True, + ), + "tags": Property(mutable=True, unordered=True), + "total_size": Property(), + "regions": Property(json_object=ImageRegion, unordered=True), + "image_sharing": Property(json_object=ImageSharing), + } + + def replicate(self, regions: Union[List[str], List[Region]]): + """ + Replicate the image to other regions. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-replicate-image + + :param regions: A list of regions that the customer wants to replicate this image in. + At least one valid region is required and only core regions allowed. + Existing images in the regions not passed will be removed. + :type regions: List[str] + """ + params = { + "regions": [ + region.id if isinstance(region, Region) else region + for region in regions + ] + } + + result = self._client.post( + "{}/regions".format(self.api_endpoint), model=self, data=params + ) + + # The replicate endpoint returns the updated Image, so we can use this + # as an opportunity to refresh the object + self._populate(result) diff --git a/linode_api4/objects/image_share_group.py b/linode_api4/objects/image_share_group.py new file mode 100644 index 000000000..6c75fc7f9 --- /dev/null +++ b/linode_api4/objects/image_share_group.py @@ -0,0 +1,344 @@ +__all__ = [ + "ImageShareGroupImageToAdd", + "ImageShareGroupImagesToAdd", + "ImageShareGroupImageToUpdate", + "ImageShareGroupMemberToAdd", + "ImageShareGroupMemberToUpdate", + "ImageShareGroup", + "ImageShareGroupToken", +] +from dataclasses import dataclass +from typing import List, Optional + +from linode_api4.objects import Base, MappedObject, Property +from linode_api4.objects.serializable import JSONObject + + +@dataclass +class ImageShareGroupImageToAdd(JSONObject): + """ + Data representing an Image to add to an Image Share Group. + """ + + id: str + label: Optional[str] = None + description: Optional[str] = None + + def to_dict(self): + d = {"id": self.id} + if self.label is not None: + d["label"] = self.label + if self.description is not None: + d["description"] = self.description + return d + + +@dataclass +class ImageShareGroupImagesToAdd(JSONObject): + """ + Data representing a list of Images to add to an Image Share Group. + """ + + images: List[ImageShareGroupImageToAdd] + + +@dataclass +class ImageShareGroupImageToUpdate(JSONObject): + """ + Data to update an Image shared in an Image Share Group. + """ + + image_share_id: str + label: Optional[str] = None + description: Optional[str] = None + + def to_dict(self): + d = {"image_share_id": self.image_share_id} + if self.label is not None: + d["label"] = self.label + if self.description is not None: + d["description"] = self.description + return d + + +@dataclass +class ImageShareGroupMemberToAdd(JSONObject): + """ + Data representing a Member to add to an Image Share Group. + """ + + token: str + label: str + + +@dataclass +class ImageShareGroupMemberToUpdate(JSONObject): + """ + Data to update a Member in an Image Share Group. + """ + + token_uuid: str + label: str + + +class ImageShareGroup(Base): + """ + An Image Share Group is a group to share private images with other users. This class is intended + to be used by a Producer of an Image Share Group, and not a Consumer. + + NOTE: Private Image Sharing features are in beta and may not be generally available. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-sharegroup + """ + + api_endpoint = "/images/sharegroups/{id}" + + properties = { + "id": Property(identifier=True), + "uuid": Property(), + "label": Property(mutable=True), + "description": Property(mutable=True), + "is_suspended": Property(), + "images_count": Property(), + "members_count": Property(), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "expiry": Property(is_datetime=True), + } + + def add_images(self, images: ImageShareGroupImagesToAdd): + """ + Add private images to be shared in the Image Share Group. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-sharegroup-images + + :param images: A list of Images to share in the Image Share Group, formatted in JSON. + :type images: ImageShareGroupImagesToAdd + + :returns: A list of the new Image shares. + :rtype: List of MappedObject + """ + params = {"images": [img.to_dict() for img in images.images]} + + result = self._client.post( + "{}/images".format(self.api_endpoint), model=self, data=params + ) + + # Sync this object to reflect the new images added to the share group. + self.invalidate() + + # Expect result to be a dict with a 'data' key + image_list = result.get("data", []) + return [MappedObject(**item) for item in image_list] + + def get_image_shares(self): + """ + Retrieves a list of images shared in the Image Share Group. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-sharegroup-images + + :returns: A list of the Image shares. + :rtype: List of MappedObject + """ + result = self._client.get( + "{}/images".format(self.api_endpoint), + model=self, + ) + image_list = result.get("data", []) + return [MappedObject(**item) for item in image_list] + + def update_image_share(self, image: ImageShareGroupImageToUpdate): + """ + Update the label and description of an Image shared in the Image Share Group. + Note that the ID provided in the image parameter must be the shared ID of an + Image already shared in the Image Share Group, not the private ID. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/put-sharegroup-imageshare + + :param image: The Image to update, formatted in JSON. + :type image: ImageShareGroupImageToUpdate + + :returns: The updated Image share. + :rtype: MappedObject + """ + params = image.to_dict() + + result = self._client.put( + "{}/images/{}".format(self.api_endpoint, image.image_share_id), + model=self, + data=params, + ) + + return MappedObject(**result) + + def revoke_image_share(self, image_share_id: str): + """ + Revoke an Image shared in the Image Share Group. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/delete-sharegroup-imageshare + + :param image_share_id: The ID of the Image share to revoke. + :type image_share_id: str + """ + self._client.delete( + "{}/images/{}".format(self.api_endpoint, image_share_id), model=self + ) + + # Sync this object to reflect the revoked image share. + self.invalidate() + + def add_member(self, member: ImageShareGroupMemberToAdd): + """ + Add a Member to the Image Share Group. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-sharegroup-members + + :param member: The Member to add, formatted in JSON. + :type member: ImageShareGroupMemberToAdd + + :returns: The new Member. + :rtype: MappedObject + """ + params = { + "token": member.token, + "label": member.label, + } + + result = self._client.post( + "{}/members".format(self.api_endpoint), model=self, data=params + ) + + # Sync this object to reflect the new member added to the share group. + self.invalidate() + + return MappedObject(**result) + + def get_members(self): + """ + Retrieves a list of members in the Image Share Group. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-sharegroup-members + + :returns: List of members. + :rtype: List of MappedObject + """ + result = self._client.get( + "{}/members".format(self.api_endpoint), + model=self, + ) + member_list = result.get("data", []) + return [MappedObject(**item) for item in member_list] + + def get_member(self, token_uuid: str): + """ + Get a Member in the Image Share Group. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-sharegroup-member-token + + :param token_uuid: The UUID of the token corresponding to the Member to retrieve. + :type token_uuid: str + + :returns: The requested Member. + :rtype: MappedObject + """ + result = self._client.get( + "{}/members/{}".format(self.api_endpoint, token_uuid), model=self + ) + + return MappedObject(**result) + + def update_member(self, member: ImageShareGroupMemberToUpdate): + """ + Update the label of a Member in the Image Share Group. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/put-sharegroup-member-token + + :param member: The Member to update, formatted in JSON. + :type member: ImageShareGroupMemberToUpdate + + :returns: The updated Member. + :rtype: MappedObject + """ + params = { + "label": member.label, + } + + result = self._client.put( + "{}/members/{}".format(self.api_endpoint, member.token_uuid), + model=self, + data=params, + ) + + return MappedObject(**result) + + def remove_member(self, token_uuid: str): + """ + Remove a Member from the Image Share Group. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/delete-sharegroup-member-token + + :param token_uuid: The UUID of the token corresponding to the Member to remove. + :type token_uuid: str + """ + self._client.delete( + "{}/members/{}".format(self.api_endpoint, token_uuid), model=self + ) + + # Sync this object to reflect the removed member. + self.invalidate() + + +class ImageShareGroupToken(Base): + """ + An Image Share Group Token is a token that can be used to access the Images shared in an Image Share Group. + This class is intended to be used by a Consumer of an Image Share Group, and not a Producer. + + NOTE: Private Image Sharing features are in beta and may not be generally available. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-sharegroup-token + """ + + api_endpoint = "/images/sharegroups/tokens/{token_uuid}" + id_attribute = "token_uuid" + properties = { + "token_uuid": Property(identifier=True), + "status": Property(), + "label": Property(mutable=True), + "valid_for_sharegroup_uuid": Property(), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "expiry": Property(is_datetime=True), + "sharegroup_uuid": Property(), + "sharegroup_label": Property(), + } + + def get_sharegroup(self): + """ + Gets details about the Image Share Group that this token provides access to. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-sharegroup-by-token + + :returns: The requested Image Share Group. + :rtype: MappedObject + """ + result = self._client.get( + "{}/sharegroup".format(self.api_endpoint), model=self + ) + + return MappedObject(**result) + + def get_images(self): + """ + Retrieves a paginated list of images shared in the Image Share Group that this token provides access to. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-sharegroup-images-by-token + + :returns: List of images. + :rtype: List of MappedObject + """ + result = self._client.get( + "{}/sharegroup/images".format(self.api_endpoint), + model=self, + ) + image_list = result.get("data", []) + return [MappedObject(**item) for item in image_list] diff --git a/linode_api4/objects/linode.py b/linode_api4/objects/linode.py new file mode 100644 index 000000000..3ffe4b232 --- /dev/null +++ b/linode_api4/objects/linode.py @@ -0,0 +1,2328 @@ +import copy +import string +import sys +import warnings +from dataclasses import dataclass, field +from datetime import datetime +from enum import Enum +from os import urandom +from random import randint +from typing import Any, Dict, List, Optional, Union +from urllib import parse + +from linode_api4.common import load_and_validate_keys +from linode_api4.errors import UnexpectedResponseError +from linode_api4.objects.base import ( + Base, + MappedObject, + Property, + _flatten_request_body_recursive, +) +from linode_api4.objects.dbase import DerivedBase +from linode_api4.objects.filtering import FilterableAttribute +from linode_api4.objects.image import Image +from linode_api4.objects.linode_interfaces import ( + LinodeInterface, + LinodeInterfaceDefaultRouteOptions, + LinodeInterfacePublicOptions, + LinodeInterfacesSettings, + LinodeInterfaceVLANOptions, + LinodeInterfaceVPCOptions, +) +from linode_api4.objects.networking import ( + Firewall, + IPAddress, + IPv6Range, + VPCIPAddress, +) +from linode_api4.objects.nodebalancer import NodeBalancer +from linode_api4.objects.region import Region +from linode_api4.objects.serializable import JSONObject, StrEnum +from linode_api4.objects.vpc import VPC, VPCSubnet +from linode_api4.paginated_list import PaginatedList +from linode_api4.util import drop_null_keys, generate_device_suffixes + +PASSWORD_CHARS = string.ascii_letters + string.digits + string.punctuation +MIN_DEVICE_LIMIT = 8 +MB_PER_GB = 1024 +MAX_DEVICE_LIMIT = 64 + + +class InstanceDiskEncryptionType(StrEnum): + """ + InstanceDiskEncryptionType defines valid values for the + Instance(...).disk_encryption field. + + API Documentation: TODO + """ + + enabled = "enabled" + disabled = "disabled" + + +class Backup(DerivedBase): + """ + A Backup of a Linode Instance. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-backup + """ + + api_endpoint = "/linode/instances/{linode_id}/backups/{id}" + derived_url_path = "backups" + parent_id_name = "linode_id" + + properties = { + "id": Property(identifier=True), + "created": Property(is_datetime=True), + "duration": Property(), + "updated": Property(is_datetime=True), + "finished": Property(is_datetime=True), + "message": Property(), + "status": Property(volatile=True), + "type": Property(), + "linode_id": Property(identifier=True), + "label": Property(), + "configs": Property(), + "disks": Property(), + "region": Property(slug_relationship=Region), + "available": Property(), + } + + def restore_to(self, linode, **kwargs): + """ + Restores a Linodeโ€™s Backup to the specified Linode. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-restore-backup + + :param linode: The id of the Instance or the Instance to share the IPAddresses with. + This Instance will be able to bring up the given addresses. + :type: linode: int or Instance + + :param kwargs: A dict containing the The ID of the Linode to restore a Backup to and + a boolean that, if True, deletes all Disks and Configs on + the target Linode before restoring. + :type: kwargs: dict + + Example usage: + kwargs = { + "linode_id": 123, + "overwrite": true + } + + :returns: Returns true if the operation was successful + :rtype: bool + """ + + d = { + "linode_id": linode, + } + d.update(kwargs) + + self._client.post( + "{}/restore".format(Backup.api_endpoint), + model=self, + data=_flatten_request_body_recursive(d), + ) + return True + + +class Disk(DerivedBase): + """ + A Disk for the storage space on a Compute Instance. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-disk + """ + + api_endpoint = "/linode/instances/{linode_id}/disks/{id}" + derived_url_path = "disks" + parent_id_name = "linode_id" + + properties = { + "id": Property(identifier=True), + "created": Property(is_datetime=True), + "label": Property(mutable=True), + "size": Property(), + "status": Property(volatile=True), + "filesystem": Property(), + "updated": Property(is_datetime=True), + "linode_id": Property(identifier=True), + "disk_encryption": Property(), + } + + def duplicate(self): + """ + Copies a disk, byte-for-byte, into a new Disk belonging to the same Linode. The Linode must have enough + storage space available to accept a new Disk of the same size as this one or this operation will fail. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-clone-linode-disk + + :returns: A Disk object representing the cloned Disk + :rtype: Disk + """ + + d = self._client.post("{}/clone".format(Disk.api_endpoint), model=self) + + if not "id" in d: + raise UnexpectedResponseError( + "Unexpected response duplicating disk!", json=d + ) + + return Disk(self._client, d["id"], self.linode_id) + + def reset_root_password(self, root_password=None): + """ + Resets the password of a Disk you have permission to read_write. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-reset-disk-password + + :param root_password: The new root password for the OS installed on this Disk. The password must meet the complexity + strength validation requirements for a strong password. + :type: root_password: str + """ + rpass = root_password + if not rpass: + rpass = Instance.generate_root_password() + + params = { + "password": rpass, + } + + self._client.post( + "{}/password".format(Disk.api_endpoint), model=self, data=params + ) + + def resize(self, new_size): + """ + Resizes this disk. The Linode Instance this disk belongs to must have + sufficient space available to accommodate the new size, and must be + offline. + + **NOTE** If resizing a disk down, the filesystem on the disk must still + fit on the new disk size. You may need to resize the filesystem on the + disk first before performing this action. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-resize-disk + + :param new_size: The intended new size of the disk, in MB + :type new_size: int + + :returns: True if the resize was initiated successfully. + :rtype: bool + """ + self._client.post( + "{}/resize".format(Disk.api_endpoint), + model=self, + data={"size": new_size}, + ) + + return True + + +class Kernel(Base): + """ + The primary component of every Linux system. The kernel interfaces + with the systemโ€™s hardware, and it controls the operating systemโ€™s core functionality. + + Your Compute Instance is capable of running one of three kinds of kernels: + + - Upstream kernel (or distribution-supplied kernel): This kernel is maintained + and provided by your Linux distribution. A major benefit of this kernel is that the + distribution was designed with this kernel in mind and all updates are managed through + the distributions package management system. It also may support features not present + in the Linode kernel (for example, SELinux). + + - Linode kernel: Linode also maintains kernels that can be used on a Compute Instance. + If selected, these kernels are provided to your Compute Instance at boot + (not directly installed on your system). The Current Kernels page displays a + list of all the available Linode kernels. + + - Custom-compiled kernel: A kernel that you compile from source. Compiling a kernel + can let you use features not available in the upstream or Linode kernels, but it takes longer + to compile the kernel from source than to download it from your package manager. For more + information on custom compiled kernels, review our guides for Debian, Ubuntu, and CentOS. + + .. note:: + The ``xen`` property is deprecated and is no longer returned by the API. + It is maintained for backward compatibility only. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-kernel + """ + + api_endpoint = "/linode/kernels/{id}" + properties = { + "created": Property(is_datetime=True), + "deprecated": Property(), + "description": Property(), + "id": Property(identifier=True), + "kvm": Property(), + "label": Property(), + "updates": Property(), + "version": Property(), + "architecture": Property(), + "xen": Property(), + "built": Property(), + "pvops": Property(), + } + + def __getattribute__(self, name: str) -> object: + if name == "xen": + warnings.warn( + "The 'xen' property of Kernel is deprecated and is no longer " + "returned by the API. It is maintained for backward compatibility only.", + DeprecationWarning, + stacklevel=2, + ) + return super().__getattribute__(name) + + +class Type(Base): + """ + Linode Plan type to specify the resources available to a Linode Instance. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-type + """ + + api_endpoint = "/linode/types/{id}" + properties = { + "disk": Property(), + "id": Property(identifier=True), + "label": Property(), + "network_out": Property(), + "price": Property(), + "region_prices": Property(), + "addons": Property(), + "memory": Property(), + "transfer": Property(), + "vcpus": Property(), + "gpus": Property(), + "successor": Property(), + "accelerated_devices": Property(), + # type_class is populated from the 'class' attribute of the returned JSON + } + + def _populate(self, json): + """ + Allows changing the name "class" in JSON to "type_class" in python + """ + + super()._populate(json) + + if json is not None and "class" in json: + setattr(self, "type_class", json["class"]) + else: + setattr(self, "type_class", None) + + # allow filtering on this converted type + type_class = FilterableAttribute("class") + + +@dataclass +class ConfigInterfaceIPv4(JSONObject): + """ + ConfigInterfaceIPv4 represents the IPv4 configuration of a VPC interface. + """ + + vpc: str = "" + nat_1_1: str = "" + + +@dataclass +class ConfigInterfaceIPv6SLAACOptions(JSONObject): + """ + ConfigInterfaceIPv6SLAACOptions is used to set a single IPv6 SLAAC configuration of a VPC interface. + """ + + range: str = "" + + +@dataclass +class ConfigInterfaceIPv6RangeOptions(JSONObject): + """ + ConfigInterfaceIPv6RangeOptions is used to set a single IPv6 range configuration of a VPC interface. + """ + + range: str = "" + + +@dataclass +class ConfigInterfaceIPv6Options(JSONObject): + """ + ConfigInterfaceIPv6Options is used to set the IPv6 configuration of a VPC interface. + """ + + slaac: List[ConfigInterfaceIPv6SLAACOptions] = field( + default_factory=lambda: [] + ) + ranges: List[ConfigInterfaceIPv6RangeOptions] = field( + default_factory=lambda: [] + ) + is_public: bool = False + + +@dataclass +class ConfigInterfaceIPv6SLAAC(JSONObject): + """ + ConfigInterfaceIPv6SLAAC represents a single SLAAC address under a VPC interface's IPv6 configuration. + """ + + put_class = ConfigInterfaceIPv6SLAACOptions + + range: str = "" + address: str = "" + + +@dataclass +class ConfigInterfaceIPv6Range(JSONObject): + """ + ConfigInterfaceIPv6Range represents a single IPv6 address under a VPC interface's IPv6 configuration. + """ + + put_class = ConfigInterfaceIPv6RangeOptions + + range: str = "" + + +@dataclass +class ConfigInterfaceIPv6(JSONObject): + """ + ConfigInterfaceIPv6 represents the IPv6 configuration of a VPC interface. + """ + + put_class = ConfigInterfaceIPv6Options + + slaac: List[ConfigInterfaceIPv6SLAAC] = field(default_factory=lambda: []) + ranges: List[ConfigInterfaceIPv6Range] = field(default_factory=lambda: []) + is_public: bool = False + + +class NetworkInterface(DerivedBase): + """ + This class represents a Configuration Profile's network interface object. + NOTE: This class cannot be used for the `interfaces` attribute on Config + POST and PUT requests. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-config-interface + """ + + api_endpoint = ( + "/linode/instances/{instance_id}/configs/{config_id}/interfaces/{id}" + ) + derived_url_path = "interfaces" + parent_id_name = "config_id" + + properties = { + "id": Property(identifier=True), + "purpose": Property(), + "label": Property(), + "ipam_address": Property(), + "primary": Property(mutable=True), + "active": Property(), + "vpc_id": Property(id_relationship=VPC), + "subnet_id": Property(), + "ipv4": Property(mutable=True, json_object=ConfigInterfaceIPv4), + "ipv6": Property(mutable=True, json_object=ConfigInterfaceIPv6), + "ip_ranges": Property(mutable=True), + } + + def __init__(self, client, id, parent_id, instance_id=None, json=None): + """ + We need a special constructor here because this object's parent + has a parent itself. + """ + if not instance_id and not isinstance(parent_id, tuple): + raise ValueError( + "ConfigInterface must either be created with a instance_id or a tuple of " + "(config_id, instance_id) for parent_id!" + ) + + if isinstance(parent_id, tuple): + instance_id = parent_id[1] + parent_id = parent_id[0] + + DerivedBase.__init__(self, client, id, parent_id, json=json) + + self._set("instance_id", instance_id) + + def __repr__(self): + return f"Interface: {self.purpose} {self.id}" + + @property + def subnet(self) -> VPCSubnet: + """ + Get the subnet this VPC is referencing. + + :returns: The VPCSubnet associated with this interface. + :rtype: VPCSubnet + """ + return VPCSubnet(self._client, self.subnet_id, self.vpc_id) + + +@dataclass +class InstancePlacementGroupAssignment(JSONObject): + """ + Represents an assignment between an instance and a Placement Group. + This is intended to be used when creating, cloning, and migrating + instances. + """ + + id: int + compliant_only: bool = False + + +@dataclass +class ConfigInterface(JSONObject): + """ + Represents a single interface in a Configuration Profile. + This class only contains data about a config interface. + If you would like to access a config interface directly, + consider using :any:`NetworkInterface`. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-config-interface + """ + + purpose: str = "public" + + # Public/VPC-specific + primary: Optional[bool] = None + + # VLAN-specific + label: Optional[str] = None + ipam_address: Optional[str] = None + + # VPC-specific + vpc_id: Optional[int] = None + subnet_id: Optional[int] = None + + ipv4: Optional[Union[ConfigInterfaceIPv4, Dict[str, Any]]] = None + ipv6: Optional[Union[ConfigInterfaceIPv6, Dict[str, Any]]] = None + + ip_ranges: Optional[List[str]] = None + + # Computed + id: int = 0 + + def __repr__(self): + return f"Interface: {self.purpose}" + + def _serialize(self, is_put: bool = False): + purpose_formats = { + "public": {"purpose": "public", "primary": self.primary}, + "vlan": { + "purpose": "vlan", + "label": self.label, + "ipam_address": self.ipam_address, + }, + "vpc": { + "purpose": "vpc", + "primary": self.primary, + "subnet_id": self.subnet_id, + "ipv4": self.ipv4, + "ipv6": self.ipv6, + "ip_ranges": self.ip_ranges, + }, + } + + if self.purpose not in purpose_formats: + raise ValueError( + f"Unknown interface purpose: {self.purpose}", + ) + + return _flatten_request_body_recursive( + { + k: v + for k, v in purpose_formats[self.purpose].items() + if v is not None + }, + is_put=is_put, + ) + + +class Config(DerivedBase): + """ + A Configuration Profile for a Linode Instance. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-config + """ + + api_endpoint = "/linode/instances/{linode_id}/configs/{id}" + derived_url_path = "configs" + parent_id_name = "linode_id" + + properties = { + "id": Property(identifier=True), + "linode_id": Property(identifier=True), + "helpers": Property(mutable=True), + "created": Property(is_datetime=True), + "root_device": Property(mutable=True), + "kernel": Property(relationship=Kernel, mutable=True), + "devices": Property(), # TODO: mutable=True), + "initrd": Property(relationship=Disk), + "updated": Property(), + "comments": Property(mutable=True), + "label": Property(mutable=True), + "run_level": Property(mutable=True), + "virt_mode": Property(mutable=True), + "memory_limit": Property(mutable=True), + "interfaces": Property(mutable=True, json_object=ConfigInterface), + } + + @property + def network_interfaces(self): + """ + Returns the Network Interfaces for this Configuration Profile. + This differs from the `interfaces` field as each NetworkInterface + object is treated as its own API object. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-config-interfaces + """ + + return [ + NetworkInterface( + self._client, v.id, self.id, instance_id=self.linode_id + ) + for v in self.interfaces + ] + + def _populate(self, json): + """ + Map devices more nicely while populating. + """ + if json is None or len(json) < 1: + return + + # needed here to avoid circular imports + from .volume import Volume # pylint: disable=import-outside-toplevel + + DerivedBase._populate(self, json) + + devices = {} + for device_index, device in json["devices"].items(): + if not device: + devices[device_index] = None + continue + + dev = None + if "disk_id" in device and device["disk_id"]: # this is a disk + dev = Disk.make_instance( + device["disk_id"], self._client, parent_id=self.linode_id + ) + else: + dev = Volume.make_instance( + device["volume_id"], self._client, parent_id=self.linode_id + ) + devices[device_index] = dev + + self._set("devices", MappedObject(**devices)) + + def _serialize(self, is_put: bool = False): + """ + Overrides _serialize to transform interfaces into json + """ + partial = DerivedBase._serialize(self, is_put=is_put) + interfaces = [] + + for c in self.interfaces: + if isinstance(c, ConfigInterface): + interfaces.append(c._serialize(is_put=is_put)) + else: + interfaces.append(c) + + partial["interfaces"] = interfaces + return partial + + def interface_create_public(self, primary=False) -> NetworkInterface: + """ + Creates a public interface for this Configuration Profile. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-linode-config-interface + + :param primary: Whether this interface is a primary interface. + :type primary: bool + + :returns: The newly created NetworkInterface. + :rtype: NetworkInterface + + """ + return self._interface_create({"purpose": "public", "primary": primary}) + + def interface_create_vlan( + self, label: str, ipam_address=None + ) -> NetworkInterface: + """ + Creates a VLAN interface for this Configuration Profile. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-linode-config-interface + + :param label: The label of the VLAN to associate this interface with. + :type label: str + :param ipam_address: The IPAM address of this interface for the associated VLAN. + :type ipam_address: str + + :returns: The newly created NetworkInterface. + :rtype: NetworkInterface + """ + params = { + "purpose": "vlan", + "label": label, + } + if ipam_address is not None: + params["ipam_address"] = ipam_address + + return self._interface_create(params) + + def interface_create_vpc( + self, + subnet: Union[int, VPCSubnet], + primary=False, + ipv4: Union[Dict[str, Any], ConfigInterfaceIPv4] = None, + ipv6: Union[Dict[str, Any], ConfigInterfaceIPv6Options] = None, + ip_ranges: Optional[List[str]] = None, + ) -> NetworkInterface: + """ + Creates a VPC interface for this Configuration Profile. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-linode-config-interface + + :param subnet: The VPC subnet to associate this interface with. + :type subnet: int or VPCSubnet + :param primary: Whether this is a primary interface. + :type primary: bool + :param ipv4: The IPv4 configuration of the interface for the associated subnet. + :type ipv4: Dict or ConfigInterfaceIPv4 + :param ipv6: The IPv6 configuration of the interface for the associated subnet. + :type ipv6: Dict or ConfigInterfaceIPv6Options + :param ip_ranges: A list of IPs or IP ranges in the VPC subnet. + Packets to these CIDRs are routed through the + VPC network interface. + :type ip_ranges: List of str + + :returns: The newly created NetworkInterface. + :rtype: NetworkInterface + """ + params = { + "purpose": "vpc", + "subnet_id": subnet, + "primary": primary, + "ipv4": ipv4, + "ipv6": ipv6, + "ip_ranges": ip_ranges, + } + + return self._interface_create( + drop_null_keys(_flatten_request_body_recursive(params)) + ) + + def interface_reorder(self, interfaces: List[Union[int, NetworkInterface]]): + """ + Change the order of the interfaces for this Configuration Profile. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-linode-config-interfaces + + :param interfaces: A list of interfaces in the desired order. + :type interfaces: List of str or NetworkInterface + """ + ids = [ + v.id if isinstance(v, NetworkInterface) else v for v in interfaces + ] + + self._client.post( + "{}/interfaces/order".format(Config.api_endpoint), + model=self, + data={"ids": ids}, + ) + self.invalidate() + + def _interface_create(self, body: Dict[str, Any]) -> NetworkInterface: + """ + The underlying ConfigInterface creation API call. + """ + result = self._client.post( + "{}/interfaces".format(Config.api_endpoint), model=self, data=body + ) + self.invalidate() + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response creating Interface", json=result + ) + + i = NetworkInterface( + self._client, result["id"], self.id, self.linode_id, result + ) + return i + + +class MigrationType: + COLD = "cold" + WARM = "warm" + + +class InterfaceGeneration(StrEnum): + """ + A string enum representing which interface generation a Linode is using. + """ + + LEGACY_CONFIG = "legacy_config" + LINODE = "linode" + + +@dataclass +class UpgradeInterfacesResult(JSONObject): + """ + Contains information about an Linode Interface upgrade operation. + + NOTE: If dry_run is True, each returned interface will be of type Dict[str, Any]. + Otherwise, each returned interface will be of type LinodeInterface. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-upgrade-linode-interfaces + """ + + dry_run: bool = False + config_id: int = 0 + interfaces: List[Union[Dict[str, Any], LinodeInterface]] = field( + default_factory=list + ) + + +class Instance(Base): + """ + A Linode Instance. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-instance + """ + + api_endpoint = "/linode/instances/{id}" + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "group": Property(mutable=True), + "status": Property(volatile=True), + "created": Property(is_datetime=True), + "updated": Property(volatile=True, is_datetime=True), + "region": Property(slug_relationship=Region), + "alerts": Property(mutable=True), + "image": Property(slug_relationship=Image), + "disks": Property(derived_class=Disk), + "configs": Property(derived_class=Config), + "type": Property(slug_relationship=Type), + "backups": Property(mutable=True), + "ipv4": Property(unordered=True), + "ipv6": Property(), + "hypervisor": Property(), + "specs": Property(), + "tags": Property(mutable=True, unordered=True), + "host_uuid": Property(), + "watchdog_enabled": Property(mutable=True), + "has_user_data": Property(), + "disk_encryption": Property(), + "lke_cluster_id": Property(), + "capabilities": Property(unordered=True), + "interface_generation": Property(), + "maintenance_policy": Property(mutable=True), + "locks": Property(unordered=True), + } + + @property + def ips(self): + """ + The ips related collection is not normalized like the others, so we have to + make an ad-hoc object to return for its response + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-ips + + :returns: Information about the IP addresses assigned to this instance. + :rtype: MappedObject + """ + if not hasattr(self, "_ips"): + result = self._client.get( + "{}/ips".format(Instance.api_endpoint), model=self + ) + + if not "ipv4" in result: + raise UnexpectedResponseError( + "Unexpected response loading IPs", json=result + ) + + v4pub = [] + for c in result["ipv4"]["public"]: + i = IPAddress(self._client, c["address"], c) + v4pub.append(i) + + v4pri = [] + for c in result["ipv4"]["private"]: + i = IPAddress(self._client, c["address"], c) + v4pri.append(i) + + shared_ips = [] + for c in result["ipv4"]["shared"]: + i = IPAddress(self._client, c["address"], c) + shared_ips.append(i) + + reserved = [] + for c in result["ipv4"]["reserved"]: + i = IPAddress(self._client, c["address"], c) + reserved.append(i) + + vpc = [ + VPCIPAddress.from_json(v) for v in result["ipv4"].get("vpc", []) + ] + + slaac = IPAddress( + self._client, + result["ipv6"]["slaac"]["address"], + result["ipv6"]["slaac"], + ) + link_local = IPAddress( + self._client, + result["ipv6"]["link_local"]["address"], + result["ipv6"]["link_local"], + ) + + ranges = [ + IPv6Range(self._client, r["range"]) + for r in result["ipv6"]["global"] + ] + + ips = MappedObject( + **{ + "ipv4": { + "public": v4pub, + "private": v4pri, + "shared": shared_ips, + "reserved": reserved, + "vpc": vpc, + }, + "ipv6": { + "slaac": slaac, + "link_local": link_local, + "ranges": ranges, + }, + } + ) + + self._set("_ips", ips) + + return self._ips + + @property + def available_backups(self): + """ + The backups response contains what backups are available to be restored. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-backups + + :returns: A List of the available backups for the Linode Instance. + :rtype: List[Backup] + """ + if not hasattr(self, "_avail_backups"): + result = self._client.get( + "{}/backups".format(Instance.api_endpoint), model=self + ) + + if not "automatic" in result: + raise UnexpectedResponseError( + "Unexpected response loading available backups!", + json=result, + ) + + automatic = [] + for a in result["automatic"]: + cur = Backup(self._client, a["id"], self.id, a) + automatic.append(cur) + + snap = None + if result["snapshot"]["current"]: + snap = Backup( + self._client, + result["snapshot"]["current"]["id"], + self.id, + result["snapshot"]["current"], + ) + + psnap = None + if result["snapshot"]["in_progress"]: + psnap = Backup( + self._client, + result["snapshot"]["in_progress"]["id"], + self.id, + result["snapshot"]["in_progress"], + ) + + self._set( + "_avail_backups", + MappedObject( + **{ + "automatic": automatic, + "snapshot": { + "current": snap, + "in_progress": psnap, + }, + } + ), + ) + + return self._avail_backups + + def reset_instance_root_password(self, root_password=None): + """ + Resets the root password for this Linode. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-reset-linode-password + + :param root_password: The root userโ€™s password on this Linode. Linode passwords must + meet a password strength score requirement that is calculated internally + by the API. If the strength requirement is not met, you will receive a + Password does not meet strength requirement error. + :type: root_password: str + """ + rpass = root_password + if not rpass: + rpass = Instance.generate_root_password() + + params = { + "root_pass": rpass, + } + + self._client.post( + "{}/password".format(Instance.api_endpoint), model=self, data=params + ) + + def transfer_year_month(self, year, month): + """ + Get per-linode transfer for specified month + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-transfer-by-year-month + + :param year: Numeric value representing the year to look up. + :type: year: int + + :param month: Numeric value representing the month to look up. + :type: month: int + + :returns: The network transfer statistics for the specified month. + :rtype: MappedObject + """ + + result = self._client.get( + "{}/transfer/{}/{}".format( + Instance.api_endpoint, + parse.quote(str(year)), + parse.quote(str(month)), + ), + model=self, + ) + + return MappedObject(**result) + + @property + def transfer(self): + """ + Get per-linode transfer + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-transfer + + :returns: The network transfer statistics for the current month. + :rtype: MappedObject + """ + if not hasattr(self, "_transfer"): + result = self._client.get( + "{}/transfer".format(Instance.api_endpoint), model=self + ) + + if not "used" in result: + raise UnexpectedResponseError( + "Unexpected response when getting Transfer Pool!" + ) + + mapped = MappedObject(**result) + + setattr(self, "_transfer", mapped) + + return self._transfer + + @property + def placement_group(self) -> Optional["PlacementGroup"]: + """ + Returns the PlacementGroup object for the Instance. + + :returns: The Placement Group this instance is under. + :rtype: Optional[PlacementGroup] + """ + # Workaround to avoid circular import + from linode_api4.objects.placement import ( # pylint: disable=import-outside-toplevel + PlacementGroup, + ) + + if not hasattr(self, "_placement_group"): + # Refresh the instance if necessary + if not self._populated: + self._api_get() + + pg_data = self._raw_json.get("placement_group", None) + + if pg_data is None: + return None + + setattr( + self, + "_placement_group", + PlacementGroup(self._client, pg_data.get("id"), json=pg_data), + ) + + return self._placement_group + + def _populate(self, json): + if json is not None: + # fixes ipv4 and ipv6 attribute of json to make base._populate work + if "ipv4" in json and "address" in json["ipv4"]: + json["ipv4"]["id"] = json["ipv4"]["address"] + if "ipv6" in json and isinstance(json["ipv6"], list): + for j in json["ipv6"]: + j["id"] = j["range"] + + Base._populate(self, json) + + def invalidate(self): + """Clear out cached properties""" + if hasattr(self, "_avail_backups"): + del self._avail_backups + + if hasattr(self, "_ips"): + del self._ips + + if hasattr(self, "_transfer"): + del self._transfer + + if hasattr(self, "_placement_group"): + del self._placement_group + + if hasattr(self, "_interfaces"): + del self._interfaces + + Base.invalidate(self) + + def boot(self, config=None): + """ + Boots a Linode you have permission to modify. If no parameters are given, a Config + profile will be chosen for this boot based on the following criteria: + + - If there is only one Config profile for this Linode, it will be used. + - If there is more than one Config profile, the last booted config will be used. + - If there is more than one Config profile and none were the last to be booted + (because the Linode was never booted or the last booted config was deleted) + an error will be returned. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-boot-linode-instance + + :param config: The Linode Config ID to boot into. + :type: config: int + + :returns: True if the operation was successful. + :rtype: bool + """ + + resp = self._client.post( + "{}/boot".format(Instance.api_endpoint), + model=self, + data={"config_id": config.id} if config else None, + ) + + if "error" in resp: + return False + return True + + def shutdown(self): + """ + Shuts down a Linode you have permission to modify. If any actions + are currently running or queued, those actions must be completed + first before you can initiate a shutdown. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-shutdown-linode-instance + + :returns: True if the operation was successful. + :rtype: bool + """ + + resp = self._client.post( + "{}/shutdown".format(Instance.api_endpoint), model=self + ) + + if "error" in resp: + return False + return True + + def reboot(self): + """ + Reboots a Linode you have permission to modify. If any actions are currently running + or queued, those actions must be completed first before you can initiate a reboot. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-reboot-linode-instance + + :returns: True if the operation was successful. + :rtype: bool + """ + + resp = self._client.post( + "{}/reboot".format(Instance.api_endpoint), model=self + ) + + if "error" in resp: + return False + return True + + def resize( + self, + new_type, + allow_auto_disk_resize=True, + migration_type: MigrationType = MigrationType.COLD, + **kwargs, + ): + """ + Resizes a Linode you have the read_write permission to a different Type. If any + actions are currently running or queued, those actions must be completed first + before you can initiate a resize. Additionally, the following criteria must be + met in order to resize a Linode: + + - The Linode must not have a pending migration. + - Your Account cannot have an outstanding balance. + - The Linode must not have more disk allocation than the new Type allows. + - In that situation, you must first delete or resize the disk to be smaller. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-resize-linode-instance + + :param new_type: The Linode Type or the id representing it. + :type: new_type: Type or int + + :param allow_auto_disk_resize: Automatically resize disks when resizing a Linode. + When resizing down to a smaller plan your Linodeโ€™s + data must fit within the smaller disk size. Defaults to true. + :type: allow_auto_disk_resize: bool + + :param migration_type: Type of migration to be used when resizing a Linode. + Customers can choose between warm and cold, the default type is cold. + :type: migration_type: str + + :returns: True if the operation was successful. + :rtype: bool + """ + + params = { + "type": new_type, + "allow_auto_disk_resize": allow_auto_disk_resize, + "migration_type": migration_type, + } + params.update(kwargs) + + resp = self._client.post( + "{}/resize".format(Instance.api_endpoint), + model=self, + data=_flatten_request_body_recursive(params), + ) + + if "error" in resp: + return False + return True + + @staticmethod + def generate_root_password(): + def _func(value): + if sys.version_info[0] < 3: + value = int(value.encode("hex"), 16) + return value + + password = "".join( + [ + PASSWORD_CHARS[_func(c) % len(PASSWORD_CHARS)] + for c in urandom(randint(50, 110)) + ] + ) + + # ensure the generated password is not too long + if len(password) > 110: + password = password[:110] + + return password + + # create derived objects + def config_create( + self, + kernel=None, + label=None, + devices=[], + disks=[], + volumes=[], + interfaces=[], + **kwargs, + ): + """ + Creates a Linode Config with the given attributes. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-add-linode-config + + :param kernel: The kernel to boot with. + :param label: The config label + :param disks: The list of disks, starting at sda, to map to this config. + :param volumes: The volumes, starting after the last disk, to map to this + config + :param devices: A list of devices to assign to this config, in device + index order. Values must be of type Disk or Volume. If this is + given, you may not include disks or volumes. + :param **kwargs: Any other arguments accepted by the api. + + :returns: A new Linode Config + """ + # needed here to avoid circular imports + from .volume import Volume # pylint: disable=import-outside-toplevel + + hypervisor_prefix = "sd" if self.hypervisor == "kvm" else "xvd" + + device_limit = int( + max( + MIN_DEVICE_LIMIT, + min(self.specs.memory // MB_PER_GB, MAX_DEVICE_LIMIT), + ) + ) + + device_names = [ + hypervisor_prefix + suffix + for suffix in generate_device_suffixes(device_limit) + ] + + device_map = { + device_names[i]: None for i in range(0, len(device_names)) + } + + if devices and (disks or volumes): + raise ValueError( + 'You may not call config_create with "devices" and ' + 'either of "disks" or "volumes" specified!' + ) + + if not devices: + if not isinstance(disks, list): + disks = [disks] + if not isinstance(volumes, list): + volumes = [volumes] + + devices = [] + + for d in disks: + if d is None: + devices.append(None) + elif isinstance(d, Disk): + devices.append(d) + else: + devices.append(Disk(self._client, int(d), self.id)) + + for v in volumes: + if v is None: + devices.append(None) + elif isinstance(v, Volume): + devices.append(v) + else: + devices.append(Volume(self._client, int(v))) + + if not devices: + raise ValueError("Must include at least one disk or volume!") + + for i, d in enumerate(devices): + if d is None: + pass + elif isinstance(d, Disk): + device_map[device_names[i]] = {"disk_id": d.id} + elif isinstance(d, Volume): + device_map[device_names[i]] = {"volume_id": d.id} + else: + raise TypeError("Disk or Volume expected!") + + param_interfaces = [] + for interface in interfaces: + if isinstance(interface, ConfigInterface): + interface = interface._serialize() + param_interfaces.append(interface) + + params = { + "kernel": kernel, + "label": ( + label + if label + else "{}_config_{}".format(self.label, len(self.configs)) + ), + "devices": device_map, + "interfaces": param_interfaces, + } + params.update(kwargs) + + result = self._client.post( + "{}/configs".format(Instance.api_endpoint), + model=self, + data=_flatten_request_body_recursive(params), + ) + self.invalidate() + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response creating config!", json=result + ) + + c = Config(self._client, result["id"], self.id, result) + return c + + def disk_create( + self, + size, + label=None, + filesystem=None, + read_only=False, + image=None, + root_pass=None, + authorized_keys=None, + authorized_users=None, + disk_encryption: Optional[ + Union[InstanceDiskEncryptionType, str] + ] = None, + stackscript=None, + **stackscript_args, + ): + """ + Creates a new Disk for this Instance. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-add-linode-disk + + :param size: The size of the disk, in MB + :param label: The label of the disk. If not given, a default label will be generated. + :param filesystem: The filesystem type for the disk. If not given, the default + for the image deployed the disk will be used. Required + if creating a disk without an image. + :param read_only: If True, creates a read-only disk + :param image: The Image to deploy to the disk. + :param root_pass: The password to configure for the root user when deploying an + image to this disk. Not used if image is not given. If an + image is given and root_pass is not, a password will be + generated and returned alongside the new disk. + :param authorized_keys: A list of SSH keys to install as trusted for the root user. + :param authorized_users: A list of usernames whose keys should be installed + as trusted for the root user. These user's keys + should already be set up, see :any:`ProfileGroup.ssh_keys` + for details. + :param disk_encryption: The disk encryption policy for this Linode. + NOTE: Disk encryption may not currently be available to all users. + :type disk_encryption: InstanceDiskEncryptionType or str + :param stackscript: A StackScript object, or the ID of one, to deploy to this + disk. Requires deploying a compatible image. + :param **stackscript_args: Any arguments to pass to the StackScript, as defined + by its User Defined Fields. + """ + + gen_pass = None + if image and not root_pass: + gen_pass = Instance.generate_root_password() + root_pass = gen_pass + + authorized_keys = load_and_validate_keys(authorized_keys) + + if image and not label: + label = "My {} Disk".format(image.label) + + params = { + "size": size, + "label": ( + label + if label + else "{}_disk_{}".format(self.label, len(self.disks)) + ), + "read_only": read_only, + "filesystem": filesystem, + "authorized_keys": authorized_keys, + "authorized_users": authorized_users, + "stackscript_id": stackscript, + } + + if disk_encryption is not None: + params["disk_encryption"] = str(disk_encryption) + + if image: + params.update( + { + "image": image, + "root_pass": root_pass, + } + ) + + if stackscript_args: + params["stackscript_data"] = stackscript_args + + result = self._client.post( + "{}/disks".format(Instance.api_endpoint), + model=self, + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + self.invalidate() + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response creating disk!", json=result + ) + + d = Disk(self._client, result["id"], self.id, result) + + if gen_pass: + return d, gen_pass + return d + + def enable_backups(self): + """ + Enable Backups for this Instance. When enabled, we will automatically + backup your Instance's data so that it can be restored at a later date. + For more information on Instance's Backups service and pricing, see our + Backups Page: https://www.linode.com/backups + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-enable-backups + + :returns: True if the operation was successful. + :rtype: bool + """ + self._client.post( + "{}/backups/enable".format(Instance.api_endpoint), model=self + ) + self.invalidate() + return True + + def cancel_backups(self): + """ + Cancels Backups for this Instance. All existing Backups will be lost, + including any snapshots that have been taken. This cannot be undone, + but Backups can be re-enabled at a later date. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-cancel-backups + + :returns: True if the operation was successful. + :rtype: bool + """ + self._client.post( + "{}/backups/cancel".format(Instance.api_endpoint), model=self + ) + self.invalidate() + return True + + def snapshot(self, label=None): + """ + Creates a snapshot Backup of a Linode. + + Important: If you already have a snapshot of this Linode, this + is a destructive action. The previous snapshot will be deleted. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-snapshot + + :param label: The label for the new snapshot. + :type: label: str + + :returns: The snapshot Backup created. + :rtype: Backup + """ + + result = self._client.post( + "{}/backups".format(Instance.api_endpoint), + model=self, + data={"label": label}, + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response taking snapshot!", json=result + ) + + # so the changes show up the next time they're accessed + if hasattr(self, "_avail_backups"): + del self._avail_backups + + b = Backup(self._client, result["id"], self.id, result) + return b + + def ip_allocate(self, public=False): + """ + Allocates a new :any:`IPAddress` for this Instance. Additional public + IPs require justification, and you may need to open a :any:`SupportTicket` + before you can add one. You may only have, at most, one private IP per + Instance. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-add-linode-ip + + :param public: If the new IP should be public or private. Defaults to + private. + :type public: bool + + :returns: The new IPAddress + :rtype: IPAddress + """ + result = self._client.post( + "{}/ips".format(Instance.api_endpoint), + model=self, + data={ + "type": "ipv4", + "public": public, + }, + ) + + if not "address" in result: + raise UnexpectedResponseError( + "Unexpected response allocating IP!", json=result + ) + + i = IPAddress(self._client, result["address"], result) + return i + + def rebuild( + self, + image, + root_pass=None, + authorized_keys=None, + disk_encryption: Optional[ + Union[InstanceDiskEncryptionType, str] + ] = None, + **kwargs, + ): + """ + Rebuilding an Instance deletes all existing Disks and Configs and deploys + a new :any:`Image` to it. This can be used to reset an existing + Instance or to install an Image on an empty Instance. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-rebuild-linode-instance + + :param image: The Image to deploy to this Instance + :type image: str or Image + :param root_pass: The root password for the newly rebuilt Instance. If + omitted, a password will be generated and returned. + :type root_pass: str + :param authorized_keys: The ssh public keys to install in the linode's + /root/.ssh/authorized_keys file. Each entry may + be a single key, or a path to a file containing + the key. + :type authorized_keys: list or str + :param disk_encryption: The disk encryption policy for this Linode. + NOTE: Disk encryption may not currently be available to all users. + :type disk_encryption: InstanceDiskEncryptionType or str + + :returns: The newly generated password, if one was not provided + (otherwise True) + :rtype: str or bool + """ + ret_pass = None + if not root_pass: + ret_pass = Instance.generate_root_password() + root_pass = ret_pass + + authorized_keys = load_and_validate_keys(authorized_keys) + + params = { + "image": image, + "root_pass": root_pass, + "authorized_keys": authorized_keys, + "disk_encryption": ( + str(disk_encryption) if disk_encryption else None + ), + } + + params.update(kwargs) + + result = self._client.post( + "{}/rebuild".format(Instance.api_endpoint), + model=self, + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response issuing rebuild!", json=result + ) + + # update ourself with the newly-returned information + self._populate(result) + + if not ret_pass: + return True + else: + return ret_pass + + def rescue(self, *disks): + """ + Rescue Mode is a safe environment for performing many system recovery and disk management + tasks. Rescue Mode is based on the Finnix recovery distribution, a self-contained and bootable + Linux distribution. You can also use Rescue Mode for tasks other than disaster recovery, + such as formatting disks to use different filesystems, copying data between disks, and + downloading files from a disk via SSH and SFTP. + + Note that โ€œsdhโ€ is reserved and unavailable during rescue. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-rescue-linode-instance + + :param disks: Devices that are either Disks or Volumes + :type: disks: dict + + Example usage: + disks = { + "sda": { + "disk_id": 124458, + "volume_id": null + }, + "sdb": { + "disk_id": null, + "volume_id": null + } + } + """ + + if disks: + disks = { + x: {"disk_id": y} + for x, y in zip( + ("sda", "sdb", "sdc", "sdd", "sde", "sdf", "sdg"), disks + ) + } + else: + disks = None + + result = self._client.post( + "{}/rescue".format(Instance.api_endpoint), + model=self, + data={"devices": disks}, + ) + + return result + + def mutate(self, allow_auto_disk_resize=True): + """ + Upgrades this Instance to the latest generation type + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-mutate-linode-instance + + :param allow_auto_disk_resize: Automatically resize disks when resizing a Linode. + When resizing down to a smaller plan your Linodeโ€™s + data must fit within the smaller disk size. Defaults to true. + :type: allow_auto_disk_resize: bool + + :returns: True if the operation was successful. + :rtype: bool + """ + + params = {"allow_auto_disk_resize": allow_auto_disk_resize} + + self._client.post( + "{}/mutate".format(Instance.api_endpoint), model=self, data=params + ) + + return True + + def initiate_migration( + self, + region=None, + upgrade=None, + migration_type: MigrationType = MigrationType.COLD, + placement_group: Union[ + InstancePlacementGroupAssignment, Dict[str, Any], int + ] = None, + ): + """ + Initiates a pending migration that is already scheduled for this Linode + Instance + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-migrate-linode-instance + + :param region: The region to which the Linode will be migrated. Must be a valid region slug. + A list of regions can be viewed by using the GET /regions endpoint. A cross data + center migration will cancel a pending migration that has not yet been initiated. + A cross data center migration will initiate a linode_migrate_datacenter_create event. + :type: region: str + + :param upgrade: When initiating a cross DC migration, setting this value to true will also ensure + that the Linode is upgraded to the latest generation of hardware that corresponds to + your Linodeโ€™s Type, if any free upgrades are available for it. If no free upgrades + are available, and this value is set to true, then the endpoint will return a 400 + error code and the migration will not be performed. If the data center set in the + region field does not allow upgrades, then the endpoint will return a 400 error + code and the migration will not be performed. + :type: upgrade: bool + + :param migration_type: The type of migration that will be used for this Linode migration. + Customers can only use this param when activating a support-created migration. + Customers can choose between a cold and warm migration, cold is the default type. + :type: migration_type: str + + :param placement_group: Information about the placement group to create this instance under. + :type placement_group: Union[InstancePlacementGroupAssignment, Dict[str, Any], int] + """ + + params = { + "region": region, + "upgrade": upgrade, + "type": migration_type, + "placement_group": _expand_placement_group_assignment( + placement_group + ), + } + + self._client.post( + "{}/migrate".format(Instance.api_endpoint), + model=self, + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + def firewalls(self): + """ + View Firewall information for Firewalls associated with this Linode. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-firewalls + + :returns: A List of Firewalls of the Linode Instance. + :rtype: List[Firewall] + """ + + result = self._client.get( + "{}/firewalls".format(Instance.api_endpoint), model=self + ) + + return [ + Firewall(self._client, firewall["id"]) + for firewall in result["data"] + ] + + def apply_firewalls(self): + """ + Reapply assigned firewalls to a Linode in case they were not applied successfully. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-apply-firewalls + + :returns: Returns True if the operation was successful + :rtype: bool + """ + + self._client.post( + "{}/firewalls/apply".format(Instance.api_endpoint), model=self + ) + + return True + + def nodebalancers(self): + """ + View a list of NodeBalancers that are assigned to this Linode and readable by the requesting User. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-node-balancers + + :returns: A List of Nodebalancers of the Linode Instance. + :rtype: List[Nodebalancer] + """ + + result = self._client.get( + "{}/nodebalancers".format(Instance.api_endpoint), model=self + ) + + return [ + NodeBalancer(self._client, nodebalancer["id"]) + for nodebalancer in result["data"] + ] + + def volumes(self): + """ + View Block Storage Volumes attached to this Linode. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-volumes + + :returns: A List of Volumes of the Linode Instance. + :rtype: List[Volume] + """ + from linode_api4.objects import ( # pylint: disable=import-outside-toplevel + Volume, + ) + + result = self._client.get( + "{}/volumes".format(Instance.api_endpoint), model=self + ) + + return [Volume(self._client, volume["id"]) for volume in result["data"]] + + def clone( + self, + to_linode=None, + region=None, + instance_type=None, + configs=[], + disks=[], + label=None, + group=None, + with_backups=None, + placement_group: Union[ + InstancePlacementGroupAssignment, + "PlacementGroup", + Dict[str, Any], + int, + ] = None, + ): + """ + Clones this linode into a new linode or into a new linode in the given region + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-clone-linode-instance + + :param to_linode: If an existing Linode is the target for the clone, the ID of that + Linode. The existing Linode must have enough resources to accept the clone. + :type: to_linode: int + + :param region: This is the Region where the Linode will be deployed. Region can only be + provided and is required when cloning to a new Linode. + :type: region: str + + :param instance_type: A Linodeโ€™s Type determines what resources are available to it, including disk space, + memory, and virtual cpus. The amounts available to a specific Linode are + returned as specs on the Linode object. + :type: instance_type: str + + :param configs: An array of configuration profile IDs. + :type: configs: List of int + + :param disks: An array of disk IDs. + :type: disks: List of int + + :param label: The label to assign this Linode when cloning to a new Linode. + :type: label: str + + :param group: A label used to group Linodes for display. Linodes are not required to have a group. + :type: group: str + + :param with_backups: If this field is set to true, the created Linode will automatically be + enrolled in the Linode Backup service. This will incur an additional charge. + :type: with_backups: bool + + :param placement_group: Information about the placement group to create this instance under. + :type placement_group: Union[InstancePlacementGroupAssignment, PlacementGroup, Dict[str, Any], int] + + :returns: The cloned Instance. + :rtype: Instance + """ + if to_linode and region: + raise ValueError( + 'You may only specify one of "to_linode" and "region"' + ) + + if region and not type: + raise ValueError('Specifying a region requires a "service" as well') + + if not isinstance(configs, list) and not isinstance( + configs, PaginatedList + ): + configs = [configs] + if not isinstance(disks, list) and not isinstance(disks, PaginatedList): + disks = [disks] + + params = { + "linode_id": to_linode, + "region": region, + "type": instance_type, + "configs": configs, + "disks": disks, + "label": label, + "group": group, + "with_backups": with_backups, + "placement_group": _expand_placement_group_assignment( + placement_group + ), + } + + result = self._client.post( + "{}/clone".format(Instance.api_endpoint), + model=self, + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response cloning Instance!", json=result + ) + + l = Instance(self._client, result["id"], result) + return l + + @property + def stats(self): + """ + Returns the JSON stats for this Instance + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-stats + + :returns: The JSON stats for this Instance + :rtype: dict + """ + # TODO - this would be nicer if we formatted the stats + return self._client.get( + "{}/stats".format(Instance.api_endpoint), model=self + ) + + @property + def lke_cluster(self) -> Optional["LKECluster"]: + """ + Returns the LKE Cluster this Instance is a node of. + + :returns: The LKE Cluster this Instance is a node of. + :rtype: Optional[LKECluster] + """ + + # Local import to prevent circular dependency + from linode_api4.objects.lke import ( # pylint: disable=import-outside-toplevel + LKECluster, + ) + + return LKECluster(self._client, self.lke_cluster_id) + + def stats_for(self, dt): + """ + Returns stats for the month containing the given datetime + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-stats-by-year-month + + :param dt: A Datetime for which to return statistics + :type: dt: Datetime + + :returns: The JSON stats for this Instance at the specified Datetime + :rtype: dict + """ + # TODO - this would be nicer if we formatted the stats + if not isinstance(dt, datetime): + raise TypeError("stats_for requires a datetime object!") + return self._client.get( + "{}/stats/{}".format( + Instance.api_endpoint, parse.quote(dt.strftime("%Y/%m")) + ), + model=self, + ) + + def interface_create( + self, + firewall: Optional[Union[Firewall, int]] = None, + default_route: Optional[ + Union[Dict[str, Any], LinodeInterfaceDefaultRouteOptions] + ] = None, + public: Optional[ + Union[Dict[str, Any], LinodeInterfacePublicOptions] + ] = None, + vlan: Optional[ + Union[Dict[str, Any], LinodeInterfaceVLANOptions] + ] = None, + vpc: Optional[Union[Dict[str, Any], LinodeInterfaceVPCOptions]] = None, + **kwargs, + ) -> LinodeInterface: + """ + Creates a new interface under this Linode. + Linode interfaces are not interchangeable with Config interfaces. + + NOTE: Linode interfaces may not currently be available to all users. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-linode-interface + + Example: Creating a simple public interface for this Linode:: + + interface = instance.interface_create( + default_route=LinodeInterfaceDefaultRouteOptions( + ipv4=True, + ipv6=True + ), + public=LinodeInterfacePublicOptions() + ) + + Example: Creating a simple VPC interface for this Linode:: + + interface = instance.interface_create( + default_route=LinodeInterfaceDefaultRouteOptions( + ipv4=True + ), + vpc=LinodeInterfaceVPCOptions( + subnet_id=12345 + ) + ) + + Example: Creating a simple VLAN interface for this Linode:: + + interface = instance.interface_create( + default_route=LinodeInterfaceDefaultRouteOptions( + ipv4=True + ), + vlan=LinodeInterfaceVLANOptions( + vlan_label="my-vlan" + ) + ) + + :param firewall: The firewall this interface should be assigned to. + :param default_route: The desired default route configuration of the new interface. + :param public: The public-specific configuration of the new interface. + If set, the new instance will be a public interface. + :param vlan: The VLAN-specific configuration of the new interface. + If set, the new instance will be a VLAN interface. + :param vpc: The VPC-specific configuration of the new interface. + If set, the new instance will be a VPC interface. + + :returns: The newly created Linode Interface. + :rtype: LinodeInterface + """ + + params = { + "firewall_id": firewall, + "default_route": default_route, + "public": public, + "vlan": vlan, + "vpc": vpc, + } + + params.update(kwargs) + + result = self._client.post( + "{}/interfaces".format(Instance.api_endpoint), + model=self, + data=drop_null_keys(_flatten_request_body_recursive(params)), + ) + + if "id" not in result: + raise UnexpectedResponseError( + "Unexpected response creating interface!", json=result + ) + + return LinodeInterface(self._client, result["id"], self.id, json=result) + + @property + def interfaces_settings(self) -> LinodeInterfacesSettings: + """ + The settings for all interfaces under this Linode. + + NOTE: Linode interfaces may not currently be available to all users. + + :returns: The settings for instance-level interface settings for this Linode. + :rtype: LinodeInterfacesSettings + """ + + # NOTE: We do not implement this as a Property because Property does + # not currently have a mechanism for 1:1 sub-entities. + + if not hasattr(self, "_interfaces_settings"): + self._set( + "_interfaces_settings", + # We don't use lazy loading here because it can trigger a known issue + # where setting fields for updates before the entity has been lazy loaded + # causes the user's value to be discarded. + self._client.load(LinodeInterfacesSettings, self.id), + ) + + return self._interfaces_settings + + @property + def linode_interfaces(self) -> Optional[list[LinodeInterface]]: + """ + All interfaces for this Linode. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-interface + + :returns: An ordered list of linode interfaces under this Linode. If the linode is with legacy config interfaces, returns None. + :rtype: Optional[list[LinodeInterface]] + """ + + if self.interface_generation != InterfaceGeneration.LINODE: + return None + + if not hasattr(self, "_interfaces"): + result = self._client.get( + "{}/interfaces".format(Instance.api_endpoint), + model=self, + ) + if "interfaces" not in result: + raise UnexpectedResponseError( + "Got unexpected response when retrieving Linode interfaces", + json=result, + ) + + self._set( + "_interfaces", + [ + LinodeInterface( + self._client, iface["id"], self.id, json=iface + ) + for iface in result["interfaces"] + ], + ) + + return self._interfaces + + def upgrade_interfaces( + self, + config: Optional[Union[Config, int]] = None, + dry_run: bool = False, + **kwargs, + ) -> UpgradeInterfacesResult: + """ + Automatically upgrades all legacy config interfaces of a + single configuration profile to Linode interfaces. + + NOTE: If dry_run is True, interfaces in the result will be + of type MappedObject rather than LinodeInterface. + + NOTE: Linode interfaces may not currently be available to all users. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-upgrade-linode-interfaces + + :param config: The configuration profile the legacy interfaces to + upgrade are under. + :type config: Config or int + :param dry_run: Whether this operation should be a dry run, + which will return the interfaces that would be + created if the operation were completed. + :type dry_run: bool + + :returns: Information about the newly upgraded interfaces. + :rtype: UpgradeInterfacesResult + """ + params = {"config_id": config, "dry_run": dry_run} + + params.update(kwargs) + + result = self._client.post( + "{}/upgrade-interfaces".format(Instance.api_endpoint), + model=self, + data=_flatten_request_body_recursive(drop_null_keys(params)), + ) + + # This resolves an edge case where `result["interfaces"]` persists across + # multiple calls, which can cause parsing errors when expanding them below. + result = copy.deepcopy(result) + + self.invalidate() + + # We don't convert interface dicts to LinodeInterface objects on dry runs + # actual API entities aren't created. + if dry_run: + result["interfaces"] = [ + MappedObject(**iface) for iface in result["interfaces"] + ] + else: + result["interfaces"] = [ + LinodeInterface(self._client, iface["id"], self.id, iface) + for iface in result["interfaces"] + ] + + return UpgradeInterfacesResult.from_json(result) + + +class UserDefinedFieldType(Enum): + text = 1 + select_one = 2 + select_many = 3 + + +class UserDefinedField: + def __init__(self, name, label, example, field_type, choices=None): + self.name = name + self.label = label + self.example = example + self.field_type = field_type + self.choices = choices + + def __repr__(self): + return "{}({}): {}".format( + self.label, self.field_type.name, self.example + ) + + +class StackScript(Base): + """ + A script allowing users to reproduce specific software configurations + when deploying Compute Instances, with more user control than static system images. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-stack-script + """ + + api_endpoint = "/linode/stackscripts/{id}" + properties = { + "user_defined_fields": Property(), + "label": Property(mutable=True), + "rev_note": Property(mutable=True), + "username": Property(), + "user_gravatar_id": Property(), + "is_public": Property(mutable=True), + "created": Property(is_datetime=True), + "deployments_active": Property(), + "script": Property(mutable=True), + "images": Property( + mutable=True, unordered=True + ), # TODO make slug_relationship + "deployments_total": Property(), + "description": Property(mutable=True), + "updated": Property(is_datetime=True), + "mine": Property(), + } + + def _populate(self, json): + """ + Override the populate method to map user_defined_fields to + fancy values + """ + Base._populate(self, json) + + mapped_udfs = [] + for udf in self.user_defined_fields: + t = UserDefinedFieldType.text + choices = None + if hasattr(udf, "oneof"): + t = UserDefinedFieldType.select_one + choices = udf.oneof.split(",") + elif hasattr(udf, "manyof"): + t = UserDefinedFieldType.select_many + choices = udf.manyof.split(",") + + mapped_udfs.append( + UserDefinedField( + udf.name, + udf.label if hasattr(udf, "label") else None, + udf.example if hasattr(udf, "example") else None, + t, + choices=choices, + ) + ) + + self._set("user_defined_fields", mapped_udfs) + ndist = [Image(self._client, d) for d in self.images] + self._set("images", ndist) + + def _serialize(self, is_put: bool = False): + dct = Base._serialize(self, is_put=is_put) + dct["images"] = [d.id for d in self.images] + return dct + + +def _expand_placement_group_assignment( + pg: Union[ + InstancePlacementGroupAssignment, "PlacementGroup", Dict[str, Any], int + ], +) -> Optional[Dict[str, Any]]: + """ + Expands the placement group argument into a dict for use in an API request body. + + :param pg: The placement group argument to be expanded. + :type pg: Union[InstancePlacementGroupAssignment, PlacementGroup, Dict[str, Any], int] + + :returns: The expanded placement group. + :rtype: Optional[Dict[str, Any]] + """ + # Workaround to avoid circular import + from linode_api4.objects.placement import ( # pylint: disable=import-outside-toplevel + PlacementGroup, + ) + + if pg is None: + return None + + if isinstance(pg, dict): + return pg + + if isinstance(pg, InstancePlacementGroupAssignment): + return pg.dict + + if isinstance(pg, PlacementGroup): + return {"id": pg.id} + + if isinstance(pg, int): + return {"id": pg} + + raise TypeError(f"Invalid type for Placement Group: {type(pg)}") diff --git a/linode_api4/objects/linode_interfaces.py b/linode_api4/objects/linode_interfaces.py new file mode 100644 index 000000000..0598d1f3c --- /dev/null +++ b/linode_api4/objects/linode_interfaces.py @@ -0,0 +1,552 @@ +from dataclasses import dataclass, field +from typing import List, Optional + +from linode_api4.objects.base import Base, Property +from linode_api4.objects.dbase import DerivedBase +from linode_api4.objects.networking import Firewall +from linode_api4.objects.serializable import JSONObject + + +@dataclass +class LinodeInterfacesSettingsDefaultRouteOptions(JSONObject): + """ + The options used to configure the default route settings for a Linode's network interfaces. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + ipv4_interface_id: Optional[int] = None + ipv6_interface_id: Optional[int] = None + + +@dataclass +class LinodeInterfacesSettingsDefaultRoute(JSONObject): + """ + The default route settings for a Linode's network interfaces. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfacesSettingsDefaultRouteOptions + + ipv4_interface_id: Optional[int] = None + ipv4_eligible_interface_ids: List[int] = field(default_factory=list) + ipv6_interface_id: Optional[int] = None + ipv6_eligible_interface_ids: List[int] = field(default_factory=list) + + +class LinodeInterfacesSettings(Base): + """ + The settings related to a Linode's network interfaces. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-interface-settings + + NOTE: Linode interfaces may not currently be available to all users. + """ + + api_endpoint = "/linode/instances/{id}/interfaces/settings" + + properties = { + "id": Property(identifier=True), + "network_helper": Property(mutable=True), + "default_route": Property( + mutable=True, json_object=LinodeInterfacesSettingsDefaultRoute + ), + } + + +# Interface POST Options +@dataclass +class LinodeInterfaceDefaultRouteOptions(JSONObject): + """ + Options accepted when creating or updating a Linode Interface's default route settings. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + ipv4: Optional[bool] = None + ipv6: Optional[bool] = None + + +@dataclass +class LinodeInterfaceVPCIPv4AddressOptions(JSONObject): + """ + Options accepted for a single address when creating or updating the IPv4 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + address: Optional[str] = None + primary: Optional[bool] = None + nat_1_1_address: Optional[str] = None + + +@dataclass +class LinodeInterfaceVPCIPv4RangeOptions(JSONObject): + """ + Options accepted for a single range when creating or updating the IPv4 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + range: str = "" + + +@dataclass +class LinodeInterfaceVPCIPv4Options(JSONObject): + """ + Options accepted when creating or updating the IPv4 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + addresses: Optional[List[LinodeInterfaceVPCIPv4AddressOptions]] = None + ranges: Optional[List[LinodeInterfaceVPCIPv4RangeOptions]] = None + + +@dataclass +class LinodeInterfaceVPCIPv6SLAACOptions(JSONObject): + """ + Options accepted for a single SLAAC when creating or updating the IPv6 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + range: Optional[str] = None + + +@dataclass +class LinodeInterfaceVPCIPv6RangeOptions(JSONObject): + """ + Options accepted for a single range when creating or updating the IPv6 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + range: Optional[str] = None + + +@dataclass +class LinodeInterfaceVPCIPv6Options(JSONObject): + """ + Options accepted when creating or updating the IPv6 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + is_public: Optional[bool] = None + slaac: Optional[List[LinodeInterfaceVPCIPv6SLAACOptions]] = None + ranges: Optional[List[LinodeInterfaceVPCIPv6RangeOptions]] = None + + +@dataclass +class LinodeInterfaceVPCOptions(JSONObject): + """ + VPC-exclusive options accepted when creating or updating a Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + subnet_id: int = 0 + ipv4: Optional[LinodeInterfaceVPCIPv4Options] = None + ipv6: Optional[LinodeInterfaceVPCIPv6Options] = None + + +@dataclass +class LinodeInterfacePublicIPv4AddressOptions(JSONObject): + """ + Options accepted for a single address when creating or updating the IPv4 configuration of a public Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + address: str = "" + primary: Optional[bool] = None + + +@dataclass +class LinodeInterfacePublicIPv4Options(JSONObject): + """ + Options accepted when creating or updating the IPv4 configuration of a public Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + addresses: Optional[List[LinodeInterfacePublicIPv4AddressOptions]] = None + + +@dataclass +class LinodeInterfacePublicIPv6RangeOptions(JSONObject): + """ + Options accepted for a single range when creating or updating the IPv6 configuration of a public Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + range: str = "" + + +@dataclass +class LinodeInterfacePublicIPv6Options(JSONObject): + """ + Options accepted when creating or updating the IPv6 configuration of a public Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + ranges: Optional[List[LinodeInterfacePublicIPv6RangeOptions]] = None + + +@dataclass +class LinodeInterfacePublicOptions(JSONObject): + """ + Public-exclusive options accepted when creating or updating a Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + ipv4: Optional[LinodeInterfacePublicIPv4Options] = None + ipv6: Optional[LinodeInterfacePublicIPv6Options] = None + + +@dataclass +class LinodeInterfaceVLANOptions(JSONObject): + """ + VLAN-exclusive options accepted when creating or updating a Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + vlan_label: str = "" + ipam_address: Optional[str] = None + + +@dataclass +class LinodeInterfaceOptions(JSONObject): + """ + Options accepted when creating or updating a Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + always_include = { + # If a default firewall_id isn't configured, the API requires that + # firewall_id is defined in the LinodeInterface POST body. + "firewall_id" + } + + firewall_id: Optional[int] = None + default_route: Optional[LinodeInterfaceDefaultRouteOptions] = None + vpc: Optional[LinodeInterfaceVPCOptions] = None + public: Optional[LinodeInterfacePublicOptions] = None + vlan: Optional[LinodeInterfaceVLANOptions] = None + + +# Interface GET Response + + +@dataclass +class LinodeInterfaceDefaultRoute(JSONObject): + """ + The default route configuration of a Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfaceDefaultRouteOptions + + ipv4: bool = False + ipv6: bool = False + + +@dataclass +class LinodeInterfaceVPCIPv4Address(JSONObject): + """ + A single address under the IPv4 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfaceVPCIPv4AddressOptions + + address: str = "" + primary: bool = False + nat_1_1_address: Optional[str] = None + + +@dataclass +class LinodeInterfaceVPCIPv4Range(JSONObject): + """ + A single range under the IPv4 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfaceVPCIPv4RangeOptions + + range: str = "" + + +@dataclass +class LinodeInterfaceVPCIPv4(JSONObject): + """ + A single address under the IPv4 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfaceVPCIPv4Options + + addresses: List[LinodeInterfaceVPCIPv4Address] = field(default_factory=list) + ranges: List[LinodeInterfaceVPCIPv4Range] = field(default_factory=list) + + +@dataclass +class LinodeInterfaceVPCIPv6SLAAC(JSONObject): + """ + A single SLAAC entry under the IPv6 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + range: str = "" + address: str = "" + + +@dataclass +class LinodeInterfaceVPCIPv6Range(JSONObject): + """ + A single range under the IPv6 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + range: str = "" + + +@dataclass +class LinodeInterfaceVPCIPv6(JSONObject): + """ + A single address under the IPv6 configuration of a VPC Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfaceVPCIPv6Options + + is_public: bool = False + slaac: List[LinodeInterfaceVPCIPv6SLAAC] = field(default_factory=list) + ranges: List[LinodeInterfaceVPCIPv6Range] = field(default_factory=list) + + +@dataclass +class LinodeInterfaceVPC(JSONObject): + """ + VPC-specific configuration field for a Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfaceVPCOptions + + vpc_id: int = 0 + subnet_id: int = 0 + + ipv4: Optional[LinodeInterfaceVPCIPv4] = None + ipv6: Optional[LinodeInterfaceVPCIPv6] = None + + +@dataclass +class LinodeInterfacePublicIPv4Address(JSONObject): + """ + A single address under the IPv4 configuration of a public Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfacePublicIPv4AddressOptions + + address: str = "" + primary: bool = False + + +@dataclass +class LinodeInterfacePublicIPv4Shared(JSONObject): + """ + A single shared address under the IPv4 configuration of a public Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + address: str = "" + linode_id: int = 0 + + +@dataclass +class LinodeInterfacePublicIPv4(JSONObject): + """ + The IPv4 configuration of a public Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfacePublicIPv4Options + + addresses: List[LinodeInterfacePublicIPv4Address] = field( + default_factory=list + ) + shared: List[LinodeInterfacePublicIPv4Shared] = field(default_factory=list) + + +@dataclass +class LinodeInterfacePublicIPv6SLAAC(JSONObject): + """ + A single SLAAC entry under the IPv6 configuration of a public Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + address: str = "" + prefix: int = 0 + + +@dataclass +class LinodeInterfacePublicIPv6Shared(JSONObject): + """ + A single shared range under the IPv6 configuration of a public Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + range: str = "" + route_target: Optional[str] = None + + +@dataclass +class LinodeInterfacePublicIPv6Range(JSONObject): + """ + A single range under the IPv6 configuration of a public Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfacePublicIPv6RangeOptions + + range: str = "" + route_target: Optional[str] = None + + +@dataclass +class LinodeInterfacePublicIPv6(JSONObject): + """ + The IPv6 configuration of a Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfacePublicIPv6Options + + slaac: List[LinodeInterfacePublicIPv6SLAAC] = field(default_factory=list) + shared: List[LinodeInterfacePublicIPv6Shared] = field(default_factory=list) + ranges: List[LinodeInterfacePublicIPv6Range] = field(default_factory=list) + + +@dataclass +class LinodeInterfacePublic(JSONObject): + """ + Public-specific configuration fields for a Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfacePublicOptions + + ipv4: Optional[LinodeInterfacePublicIPv4] = None + ipv6: Optional[LinodeInterfacePublicIPv6] = None + + +@dataclass +class LinodeInterfaceVLAN(JSONObject): + """ + VLAN-specific configuration fields for a Linode Interface. + + NOTE: Linode interfaces may not currently be available to all users. + """ + + put_class = LinodeInterfaceVLANOptions + + vlan_label: str = "" + ipam_address: Optional[str] = None + + +class LinodeInterface(DerivedBase): + """ + A Linode's network interface. + + NOTE: Linode interfaces may not currently be available to all users. + + NOTE: When using the ``save()`` method, certain local fields with computed values will + not be refreshed on the local object until after ``invalidate()`` has been called:: + + # Automatically assign an IPv4 address from the associated VPC Subnet + interface.vpc.ipv4.addresses[0].address = "auto" + + # Save the interface + interface.save() + + # Invalidate the interface + interface.invalidate() + + # Access the new address + print(interface.vpc.ipv4.addresses[0].address) + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-interface + """ + + api_endpoint = "/linode/instances/{linode_id}/interfaces/{id}" + derived_url_path = "interfaces" + parent_id_name = "linode_id" + + properties = { + "linode_id": Property(identifier=True), + "id": Property(identifier=True), + "mac_address": Property(), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "version": Property(), + "default_route": Property( + mutable=True, + json_object=LinodeInterfaceDefaultRoute, + ), + "public": Property(mutable=True, json_object=LinodeInterfacePublic), + "vlan": Property(mutable=True, json_object=LinodeInterfaceVLAN), + "vpc": Property(mutable=True, json_object=LinodeInterfaceVPC), + } + + def firewalls(self, *filters) -> List[Firewall]: + """ + Retrieves a list of Firewalls for this Linode Interface. + Linode interfaces are not interchangeable with Config interfaces. + + NOTE: Linode interfaces may not currently be available to all users. + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A List of Firewalls for this Linode Interface. + :rtype: List[Firewall] + + NOTE: Caching is disabled on this method and each call will make + an additional Linode API request. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-linode-interface-firewalls + """ + + return self._client._get_and_filter( + Firewall, + *filters, + endpoint="{}/firewalls".format(LinodeInterface.api_endpoint).format( + **vars(self) + ), + ) diff --git a/linode_api4/objects/lke.py b/linode_api4/objects/lke.py new file mode 100644 index 000000000..aa506a606 --- /dev/null +++ b/linode_api4/objects/lke.py @@ -0,0 +1,646 @@ +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Union +from urllib import parse + +from linode_api4.common import Price, RegionPrice +from linode_api4.errors import UnexpectedResponseError +from linode_api4.objects import ( + Base, + DerivedBase, + Instance, + InstanceDiskEncryptionType, + JSONObject, + MappedObject, + Property, + Region, + Type, +) +from linode_api4.objects.base import _flatten_request_body_recursive +from linode_api4.util import drop_null_keys + + +class LKEType(Base): + """ + An LKEType represents the structure of a valid LKE type. + Currently the LKEType can only be retrieved by listing, i.e.: + types = client.lke.types() + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-types + """ + + properties = { + "id": Property(identifier=True), + "label": Property(), + "price": Property(json_object=Price), + "region_prices": Property(json_object=RegionPrice), + "transfer": Property(), + } + + +class KubeVersion(Base): + """ + A KubeVersion is a version of Kubernetes that can be deployed on LKE. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-version + """ + + api_endpoint = "/lke/versions/{id}" + + properties = { + "id": Property(identifier=True), + } + + +class TieredKubeVersion(DerivedBase): + """ + A TieredKubeVersion is a version of Kubernetes that is specific to a certain LKE tier. + + NOTE: LKE tiers may not currently be available to all users. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-version + """ + + api_endpoint = "/lke/tiers/{tier}/versions/{id}" + parent_id_name = "tier" + id_attribute = "id" + derived_url_path = "versions" + + properties = { + "id": Property(identifier=True), + "tier": Property(identifier=True), + } + + +@dataclass +class LKENodePoolTaint(JSONObject): + """ + LKENodePoolTaint represents the structure of a single taint that can be + applied to a node pool. + """ + + include_none_values = True + + key: Optional[str] = None + value: Optional[str] = None + effect: Optional[str] = None + + +@dataclass +class LKEClusterControlPlaneACLAddressesOptions(JSONObject): + """ + LKEClusterControlPlaneACLAddressesOptions are options used to configure + IP ranges that are explicitly allowed to access an LKE cluster's control plane. + """ + + ipv4: Optional[List[str]] = None + + ipv6: Optional[List[str]] = None + + +@dataclass +class LKEClusterControlPlaneACLOptions(JSONObject): + """ + LKEClusterControlPlaneACLOptions is used to set + the ACL configuration of an LKE cluster's control plane. + """ + + enabled: Optional[bool] = None + addresses: Optional[LKEClusterControlPlaneACLAddressesOptions] = None + + +@dataclass +class LKEClusterControlPlaneOptions(JSONObject): + """ + LKEClusterControlPlaneOptions is used to configure + the control plane of an LKE cluster during its creation. + """ + + high_availability: Optional[bool] = None + acl: Optional[LKEClusterControlPlaneACLOptions] = None + + +@dataclass +class LKEClusterControlPlaneACLAddresses(JSONObject): + """ + LKEClusterControlPlaneACLAddresses describes IP ranges that are explicitly allowed + to access an LKE cluster's control plane. + """ + + include_none_values = True + + ipv4: Optional[List[str]] = None + ipv6: Optional[List[str]] = None + + +@dataclass +class LKEClusterControlPlaneACL(JSONObject): + """ + LKEClusterControlPlaneACL describes the ACL configuration of an LKE cluster's + control plane. + """ + + include_none_values = True + + enabled: bool = False + addresses: Optional[LKEClusterControlPlaneACLAddresses] = None + + +class LKENodePoolNode: + """ + AN LKE Node Pool Node is a helper class that is used to populate the "nodes" + array of an LKE Node Pool, and set up an automatic relationship with the + Linode Instance the Node represented. + """ + + def __init__(self, client, json): + """ + Creates this NodePoolNode + """ + #: The ID of this Node Pool Node + self.id = json.get( + "id" + ) # why do these have an ID if they don't have an endpoint of their own? + + #: The ID of the Linode Instance this Node represents + self.instance_id = json.get("instance_id") + + #: The Instance object backing this Node Pool Node + self.instance = Instance(client, self.instance_id) + + #: The Status of this Node Pool Node + self.status = json.get("status") + + +class LKENodePool(DerivedBase): + """ + An LKE Node Pool describes a pool of Linode Instances that exist within an + LKE Cluster. + + NOTE: The k8s_version and update_strategy fields are only available for LKE Enterprise clusters. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-node-pool + """ + + api_endpoint = "/lke/clusters/{cluster_id}/pools/{id}" + derived_url_path = "pools" + parent_id_name = "cluster_id" + + properties = { + "id": Property(identifier=True), + "cluster_id": Property(identifier=True), + "label": Property(mutable=True), + "type": Property(slug_relationship=Type), + "disks": Property(), + "disk_encryption": Property(), + "count": Property(mutable=True), + "nodes": Property( + volatile=True + ), # this is formatted in _populate below + "autoscaler": Property(mutable=True), + "tags": Property(mutable=True, unordered=True), + "labels": Property(mutable=True), + "taints": Property(mutable=True), + # Enterprise-specific properties + # Ideally we would use slug_relationship=TieredKubeVersion here, but + # it isn't possible without an extra request because the tier is not + # directly exposed in the node pool response. + "k8s_version": Property(mutable=True), + "update_strategy": Property(mutable=True), + "firewall_id": Property(mutable=True), + } + + def _parse_raw_node( + self, raw_node: Union[LKENodePoolNode, dict, str] + ) -> LKENodePoolNode: + """ + Builds a list of LKENodePoolNode objects given a node pool response's JSON. + """ + if isinstance(raw_node, LKENodePoolNode): + return raw_node + + if isinstance(raw_node, dict): + node_id = raw_node.get("id") + if node_id is None: + raise ValueError("Node dictionary does not contain 'id' key") + + return LKENodePoolNode(self._client, raw_node) + + if isinstance(raw_node, str): + return self._client.load( + LKENodePoolNode, target_id=raw_node, target_parent_id=self.id + ) + + raise TypeError("Unsupported node type: {}".format(type(raw_node))) + + def _populate(self, json): + """ + Parse Nodes into more useful LKENodePoolNode objects + """ + + if json is not None and json != {}: + json["nodes"] = [ + self._parse_raw_node(node) for node in json.get("nodes", []) + ] + + json["taints"] = [ + ( + LKENodePoolTaint.from_json(taint) + if not isinstance(taint, LKENodePoolTaint) + else taint + ) + for taint in json.get("taints", []) + ] + + super()._populate(json) + + def recycle(self): + """ + Deleted and recreates all Linodes in this Node Pool in a rolling fashion. + Completing this operation may take several minutes. This operation will + cause all local data on Linode Instances in this pool to be lost. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-lke-cluster-pool-recycle + """ + self._client.post( + "{}/recycle".format(LKENodePool.api_endpoint), model=self + ) + self.invalidate() + + +class LKECluster(Base): + """ + An LKE Cluster is a single k8s cluster deployed via Linode Kubernetes Engine. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-cluster + """ + + api_endpoint = "/lke/clusters/{id}" + + properties = { + "id": Property(identifier=True), + "created": Property(is_datetime=True), + "label": Property(mutable=True), + "tags": Property(mutable=True, unordered=True), + "updated": Property(is_datetime=True), + "region": Property(slug_relationship=Region), + "k8s_version": Property(slug_relationship=KubeVersion, mutable=True), + "pools": Property(derived_class=LKENodePool), + "control_plane": Property(mutable=True), + "apl_enabled": Property(), + "tier": Property(), + } + + def invalidate(self): + """ + Extends the default invalidation logic to drop cached properties. + """ + if hasattr(self, "_api_endpoints"): + del self._api_endpoints + + if hasattr(self, "_kubeconfig"): + del self._kubeconfig + + if hasattr(self, "_control_plane_acl"): + del self._control_plane_acl + + Base.invalidate(self) + + @property + def api_endpoints(self): + """ + A list of API Endpoints for this Cluster. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-cluster-api-endpoints + + :returns: A list of MappedObjects of the API Endpoints + :rtype: List[MappedObject] + """ + # This result appears to be a PaginatedList, but objects in the list don't + # have IDs and can't be retrieved on their own, and it doesn't accept normal + # pagination properties, so we're converting this to a list of strings. + if not hasattr(self, "_api_endpoints"): + results = self._client.get( + "{}/api-endpoints".format(LKECluster.api_endpoint), model=self + ) + + self._api_endpoints = [MappedObject(**c) for c in results["data"]] + + return self._api_endpoints + + @property + def kubeconfig(self): + """ + The administrative Kubernetes Config used to access this cluster, encoded + in base64. Note that this config contains sensitive credentials to your + cluster. + + To convert this config into a readable form, use python's `base64` module:: + + import base64 + + config = my_cluster.kubeconfig + yaml_config = base64.b64decode(config) + + # write this config out to disk + with open("/path/to/target/kubeconfig.yaml", "w") as f: + f.write(yaml_config.decode()) + + It may take a few minutes for a config to be ready when creating a new + cluster; during that time this request may fail. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-cluster-kubeconfig + + :returns: The Kubeconfig file for this Cluster. + :rtype: str + """ + if not hasattr(self, "_kubeconfig"): + result = self._client.get( + "{}/kubeconfig".format(LKECluster.api_endpoint), model=self + ) + + self._kubeconfig = result["kubeconfig"] + + return self._kubeconfig + + @property + def control_plane_acl(self) -> LKEClusterControlPlaneACL: + """ + Gets the ACL configuration of this cluster's control plane. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-cluster-acl + + :returns: The cluster's control plane ACL configuration. + :rtype: LKEClusterControlPlaneACL + """ + + if not hasattr(self, "_control_plane_acl"): + result = self._client.get( + f"{LKECluster.api_endpoint}/control_plane_acl", model=self + ) + + self._control_plane_acl = result.get("acl") + + return LKEClusterControlPlaneACL.from_json(self._control_plane_acl) + + @property + def apl_console_url(self) -> Optional[str]: + """ + Returns the URL of this cluster's APL installation if this cluster + is APL-enabled, else None. + + :returns: The URL of the APL console for this cluster. + :rtype: str or None + """ + + if not self.apl_enabled: + return None + + return f"https://console.lke{self.id}.akamai-apl.net" + + @property + def apl_health_check_url(self) -> Optional[str]: + """ + Returns the URL of this cluster's APL health check endpoint if this cluster + is APL-enabled, else None. + + :returns: The URL of the APL console for this cluster. + :rtype: str or None + """ + + if not self.apl_enabled: + return None + + return f"https://auth.lke{self.id}.akamai-apl.net/ready" + + def node_pool_create( + self, + node_type: Union[Type, str], + node_count: int, + labels: Optional[Dict[str, str]] = None, + taints: List[Union[LKENodePoolTaint, Dict[str, Any]]] = None, + k8s_version: Optional[ + Union[str, KubeVersion, TieredKubeVersion] + ] = None, + update_strategy: Optional[str] = None, + label: str = None, + disk_encryption: Optional[ + Union[str, InstanceDiskEncryptionType] + ] = None, + **kwargs, + ): + """ + Creates a new :any:`LKENodePool` for this cluster. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-lke-cluster-pools + + :param node_type: The type of nodes to create in this pool. + :type node_type: :any:`Type` or str + :param node_count: The number of nodes to create in this pool. + :type node_count: int + :param labels: A dict mapping labels to their values to apply to this pool. + :type labels: Dict[str, str] + :param taints: A list of taints to apply to this pool. + :type taints: List of :any:`LKENodePoolTaint` or dict. + :param k8s_version: The Kubernetes version to use for this pool. + NOTE: This field is specific to enterprise clusters. + :type k8s_version: str, KubeVersion, or TieredKubeVersion + :param update_strategy: The strategy to use when updating this node pool. + NOTE: This field is specific to enterprise clusters. + :type update_strategy: str + :param disk_encryption: Local disk encryption setting for this LKE node pool. + One of 'enabled' or 'disabled'. Defaults to 'disabled'. + :type disk_encryption: str or InstanceDiskEncryptionType + :param kwargs: Any other arguments to pass to the API. See the API docs + for possible values. + + :returns: The new Node Pool + :param label: The name of the node pool. + :type label: str + :rtype: LKENodePool + """ + params = { + "type": node_type, + "label": label, + "count": node_count, + "labels": labels, + "taints": taints, + "k8s_version": k8s_version, + "update_strategy": update_strategy, + "disk_encryption": disk_encryption, + } + params.update(kwargs) + + result = self._client.post( + "{}/pools".format(LKECluster.api_endpoint), + model=self, + data=drop_null_keys(_flatten_request_body_recursive(params)), + ) + self.invalidate() + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response creating node pool!", json=result + ) + + return LKENodePool(self._client, result["id"], self.id, result) + + def cluster_dashboard_url_view(self): + """ + Get a Kubernetes Dashboard access URL for this Cluster. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-cluster-dashboard + + :returns: The Kubernetes Dashboard access URL for this Cluster. + :rtype: str + """ + + result = self._client.get( + "{}/dashboard".format(LKECluster.api_endpoint), model=self + ) + + return result["url"] + + def kubeconfig_delete(self): + """ + Delete and regenerate the Kubeconfig file for a Cluster. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/delete-lke-cluster-kubeconfig + """ + + self._client.delete( + "{}/kubeconfig".format(LKECluster.api_endpoint), model=self + ) + + def node_view(self, nodeId): + """ + Get a specific Node by ID. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-lke-cluster-node + + :param nodeId: ID of the Node to look up. + :type nodeId: str + + :returns: The specified Node + :rtype: LKENodePoolNode + """ + + node = self._client.get( + "{}/nodes/{}".format( + LKECluster.api_endpoint, parse.quote(str(nodeId)) + ), + model=self, + ) + + return LKENodePoolNode(self._client, node) + + def node_delete(self, nodeId): + """ + Delete a specific Node from a Node Pool. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/delete-lke-cluster-node + + :param nodeId: ID of the Node to delete. + :type nodeId: str + """ + + self._client.delete( + "{}/nodes/{}".format( + LKECluster.api_endpoint, parse.quote(str(nodeId)) + ), + model=self, + ) + + def node_recycle(self, nodeId): + """ + Recycle a specific Node from an LKE cluster. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-lke-cluster-node-recycle + + :param nodeId: ID of the Node to recycle. + :type nodeId: str + """ + + self._client.post( + "{}/nodes/{}/recycle".format( + LKECluster.api_endpoint, parse.quote(str(nodeId)) + ), + model=self, + ) + + def cluster_nodes_recycle(self): + """ + Recycles all nodes in all pools of a designated Kubernetes Cluster. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-lke-cluster-recycle + """ + + self._client.post( + "{}/recycle".format(LKECluster.api_endpoint), model=self + ) + + def cluster_regenerate(self): + """ + Regenerate the Kubeconfig file and/or the service account token for a Cluster. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-lke-cluster-regenerate + """ + + self._client.post( + "{}/regenerate".format(LKECluster.api_endpoint), model=self + ) + + def service_token_delete(self): + """ + Delete and regenerate the service account token for a Cluster. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/delete-lke-service-token + """ + + self._client.delete( + "{}/servicetoken".format(LKECluster.api_endpoint), model=self + ) + + def control_plane_acl_update( + self, acl: Union[LKEClusterControlPlaneACLOptions, Dict[str, Any]] + ) -> LKEClusterControlPlaneACL: + """ + Updates the ACL configuration for this cluster's control plane. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/put-lke-cluster-acl + + :param acl: The ACL configuration to apply to this cluster. + :type acl: LKEClusterControlPlaneACLOptions or Dict[str, Any] + + :returns: The updated control plane ACL configuration. + :rtype: LKEClusterControlPlaneACL + """ + if isinstance(acl, LKEClusterControlPlaneACLOptions): + acl = acl.dict + + result = self._client.put( + f"{LKECluster.api_endpoint}/control_plane_acl", + model=self, + data={"acl": drop_null_keys(acl)}, + ) + + acl = result.get("acl") + + self._control_plane_acl = result.get("acl") + + return LKEClusterControlPlaneACL.from_json(acl) + + def control_plane_acl_delete(self): + """ + Deletes the ACL configuration for this cluster's control plane. + This has the same effect as calling control_plane_acl_update with the `enabled` field + set to False. Access controls are disabled and all rules are deleted. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/delete-lke-cluster-acl + """ + self._client.delete( + f"{LKECluster.api_endpoint}/control_plane_acl", model=self + ) + + # Invalidate the cache so it is automatically refreshed on next access + if hasattr(self, "_control_plane_acl"): + del self._control_plane_acl diff --git a/linode_api4/objects/lock.py b/linode_api4/objects/lock.py new file mode 100644 index 000000000..9cee64517 --- /dev/null +++ b/linode_api4/objects/lock.py @@ -0,0 +1,47 @@ +from dataclasses import dataclass + +from linode_api4.objects.base import Base, Property +from linode_api4.objects.serializable import JSONObject, StrEnum + +__all__ = ["LockType", "LockEntity", "Lock"] + + +class LockType(StrEnum): + """ + LockType defines valid values for resource lock types. + + API Documentation: TBD + """ + + cannot_delete = "cannot_delete" + cannot_delete_with_subresources = "cannot_delete_with_subresources" + + +@dataclass +class LockEntity(JSONObject): + """ + Represents the entity that is locked. + + API Documentation: TBD + """ + + id: int = 0 + type: str = "" + label: str = "" + url: str = "" + + +class Lock(Base): + """ + A resource lock that prevents deletion or modification of a resource. + + API Documentation: TBD + """ + + api_endpoint = "/locks/{id}" + + properties = { + "id": Property(identifier=True), + "lock_type": Property(), + "entity": Property(json_object=LockEntity), + } diff --git a/linode_api4/objects/longview.py b/linode_api4/objects/longview.py new file mode 100644 index 000000000..7a1ed56d5 --- /dev/null +++ b/linode_api4/objects/longview.py @@ -0,0 +1,55 @@ +from linode_api4.objects import Base, Property + + +class LongviewClient(Base): + """ + A Longview Client that is accessible for use. Longview is Linodeโ€™s system data graphing service. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-longview-client + """ + + api_endpoint = "/longview/clients/{id}" + + properties = { + "id": Property(identifier=True), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "label": Property(mutable=True), + "install_code": Property(), + "apps": Property(), + "api_key": Property(), + } + + +class LongviewSubscription(Base): + """ + Contains the Longview Plan details for a specific subscription id. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-longview-subscription + """ + + api_endpoint = "/longview/subscriptions/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(), + "clients_included": Property(), + "price": Property(), + } + + +class LongviewPlan(Base): + """ + The current Longview Plan an account is using. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-longview-plan + """ + + api_endpoint = "/longview/plan" + + properties = { + "id": Property(identifier=True), + "label": Property(), + "clients_included": Property(), + "price": Property(), + } diff --git a/linode_api4/objects/monitor.py b/linode_api4/objects/monitor.py new file mode 100644 index 000000000..ca8f83921 --- /dev/null +++ b/linode_api4/objects/monitor.py @@ -0,0 +1,507 @@ +from dataclasses import dataclass, field +from typing import List, Optional, Union + +from linode_api4.objects import DerivedBase +from linode_api4.objects.base import Base, Property +from linode_api4.objects.serializable import JSONObject, StrEnum + +__all__ = [ + "AggregateFunction", + "Alert", + "AlertChannel", + "AlertDefinition", + "AlertType", + "Alerts", + "MonitorDashboard", + "MonitorMetricsDefinition", + "MonitorService", + "MonitorServiceToken", + "RuleCriteria", + "TriggerConditions", +] + + +class AggregateFunction(StrEnum): + """ + Enum for supported aggregate functions. + """ + + min = "min" + max = "max" + avg = "avg" + sum = "sum" + count = "count" + rate = "rate" + increase = "increase" + last = "last" + + +class ChartType(StrEnum): + """ + Enum for supported chart types. + """ + + line = "line" + area = "area" + + +class ServiceType(StrEnum): + """ + Enum for supported service types. + """ + + dbaas = "dbaas" + linode = "linode" + lke = "lke" + vpc = "vpc" + nodebalancer = "nodebalancer" + firewall = "firewall" + object_storage = "object_storage" + aclb = "aclb" + net_load_balancer = "netloadbalancer" + + +class MetricType(StrEnum): + """ + Enum for supported metric type + """ + + gauge = "gauge" + counter = "counter" + histogram = "histogram" + summary = "summary" + + +class CriteriaCondition(StrEnum): + """ + Enum for supported CriteriaCondition + Currently, only ALL is supported. + """ + + all = "ALL" + + +class MetricUnit(StrEnum): + """ + Enum for supported metric units. + """ + + COUNT = "count" + PERCENT = "percent" + BYTE = "byte" + SECOND = "second" + BITS_PER_SECOND = "bits_per_second" + MILLISECOND = "millisecond" + KB = "KB" + MB = "MB" + GB = "GB" + RATE = "rate" + BYTES_PER_SECOND = "bytes_per_second" + PERCENTILE = "percentile" + RATIO = "ratio" + OPS_PER_SECOND = "ops_per_second" + IOPS = "iops" + KILO_BYTES_PER_SECOND = "kilo_bytes_per_second" + SESSIONS_PER_SECOND = "sessions_per_second" + PACKETS_PER_SECOND = "packets_per_second" + KILO_BITS_PER_SECOND = "kilo_bits_per_second" + + +class DashboardType(StrEnum): + """ + Enum for supported dashboard types. + """ + + standard = "standard" + custom = "custom" + + +class AlertStatus(StrEnum): + """ + Enum for supported alert status values. + """ + + AlertDefinitionStatusProvisioning = "provisioning" + AlertDefinitionStatusEnabling = "enabling" + AlertDefinitionStatusDisabling = "disabling" + AlertDefinitionStatusEnabled = "enabled" + AlertDefinitionStatusDisabled = "disabled" + AlertDefinitionStatusFailed = "failed" + + +@dataclass +class Filter(JSONObject): + """ + Represents a filter in the filters list of a dashboard widget. + """ + + dimension_label: str = "" + operator: str = "" + value: str = "" + + +@dataclass +class DashboardWidget(JSONObject): + """ + Represents a single widget in the widgets list. + """ + + metric: str = "" + unit: MetricUnit = "" + label: str = "" + color: str = "" + size: int = 0 + chart_type: ChartType = "" + y_label: str = "" + aggregate_function: AggregateFunction = "" + group_by: Optional[List[str]] = None + _filters: Optional[List[Filter]] = field( + default=None, metadata={"json_key": "filters"} + ) + + def __getattribute__(self, name): + """Override to handle the filters attribute specifically to avoid metaclass conflict.""" + if name == "filters": + return object.__getattribute__(self, "_filters") + return object.__getattribute__(self, name) + + def __setattr__(self, name, value): + """Override to handle setting the filters attribute.""" + if name == "filters": + object.__setattr__(self, "_filters", value) + else: + object.__setattr__(self, name, value) + + +@dataclass +class ServiceAlert(JSONObject): + """ + Represents alert configuration options for a monitor service. + """ + + polling_interval_seconds: Optional[List[int]] = None + evaluation_period_seconds: Optional[List[int]] = None + scope: Optional[List[str]] = None + + +@dataclass +class Dimension(JSONObject): + """ + Represents a single dimension in the dimensions list. + """ + + dimension_label: Optional[str] = None + label: Optional[str] = None + values: Optional[List[str]] = None + + +@dataclass +class MonitorMetricsDefinition(JSONObject): + """ + Represents a single metric definition in the metrics definition list. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-information + """ + + metric: str = "" + label: str = "" + metric_type: MetricType = "" + unit: MetricUnit = "" + scrape_interval: int = 0 + is_alertable: bool = False + dimensions: Optional[List[Dimension]] = None + available_aggregate_functions: Optional[List[AggregateFunction]] = None + + +class MonitorDashboard(Base): + """ + Dashboard details. + + List dashboards: https://techdocs.akamai.com/linode-api/get-dashboards-all + """ + + api_endpoint = "/monitor/dashboards/{id}" + properties = { + "id": Property(identifier=True), + "created": Property(is_datetime=True), + "label": Property(), + "service_type": Property(ServiceType), + "type": Property(DashboardType), + "widgets": Property(json_object=DashboardWidget), + "updated": Property(is_datetime=True), + } + + +class MonitorService(Base): + """ + Represents a single service type. + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-monitor-services + + """ + + api_endpoint = "/monitor/services/{service_type}" + id_attribute = "service_type" + properties = { + "service_type": Property(ServiceType), + "label": Property(), + "alert": Property(json_object=ServiceAlert), + } + + +@dataclass +class MonitorServiceToken(JSONObject): + """ + A token for the requested service_type. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-get-token + """ + + token: str = "" + + +@dataclass +class TriggerConditions(JSONObject): + """ + Represents the trigger/evaluation configuration for an alert. + + Expected JSON example: + "trigger_conditions": { + "criteria_condition": "ALL", + "evaluation_period_seconds": 60, + "polling_interval_seconds": 10, + "trigger_occurrences": 3 + } + + Fields: + - criteria_condition: "ALL" (currently, only "ALL" is supported) + - evaluation_period_seconds: seconds over which the rule(s) are evaluated + - polling_interval_seconds: how often metrics are sampled (seconds) + - trigger_occurrences: how many consecutive evaluation periods must match to trigger + """ + + criteria_condition: CriteriaCondition = CriteriaCondition.all + evaluation_period_seconds: int = 0 + polling_interval_seconds: int = 0 + trigger_occurrences: int = 0 + + +@dataclass +class DimensionFilter(JSONObject): + """ + A single dimension filter used inside a Rule. + + Example JSON: + { + "dimension_label": "node_type", + "label": "Node Type", + "operator": "eq", + "value": "primary" + } + """ + + dimension_label: str = "" + label: str = "" + operator: str = "" + value: Optional[str] = None + + +@dataclass +class Rule(JSONObject): + """ + A single rule within RuleCriteria. + Example JSON: + { + "aggregate_function": "avg", + "dimension_filters": [ ... ], + "label": "Memory Usage", + "metric": "memory_usage", + "operator": "gt", + "threshold": 95, + "unit": "percent" + } + """ + + aggregate_function: Optional[Union[AggregateFunction, str]] = None + dimension_filters: Optional[List[DimensionFilter]] = None + label: str = "" + metric: str = "" + operator: str = "" + threshold: Optional[float] = None + unit: Optional[str] = None + + +@dataclass +class RuleCriteria(JSONObject): + """ + Container for a list of Rule objects, matching the JSON shape: + "rule_criteria": { "rules": [ { ... }, ... ] } + """ + + rules: Optional[List[Rule]] = None + + +@dataclass +class Alert(JSONObject): + """ + Represents an alert definition reference within an AlertChannel. + + Fields: + - id: int - Unique identifier of the alert definition. + - label: str - Human-readable name for the alert definition. + - type: str - Type of the alert (e.g., 'alerts-definitions'). + - url: str - API URL for the alert definition. + """ + + id: int = 0 + label: str = "" + _type: str = field(default="", metadata={"json_key": "type"}) + url: str = "" + + +@dataclass +class Alerts(JSONObject): + """ + Represents a collection of alert definitions within an AlertChannel. + + Fields: + - items: List[Alert] - List of alert definitions. + """ + + items: List[Alert] = field(default_factory=list) + + +class AlertType(StrEnum): + """ + Enumeration of alert origin types used by alert definitions. + + Values: + - system: Alerts that originate from the system (built-in or platform-managed). + - user: Alerts created and managed by users (custom alerts). + + The API uses this value in the `type` field of alert-definition responses. + This enum can be used to compare or validate the `type` value when + processing alert definitions. + """ + + system = "system" + user = "user" + + +class AlertDefinition(DerivedBase): + """ + Represents an alert definition for a monitor service. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-alert-definition + """ + + api_endpoint = "/monitor/services/{service_type}/alert-definitions/{id}" + derived_url_path = "alert-definitions" + parent_id_name = "service_type" + id_attribute = "id" + + properties = { + "id": Property(identifier=True), + "service_type": Property(identifier=True), + "label": Property(mutable=True), + "severity": Property(mutable=True), + "type": Property(mutable=True), + "status": Property(mutable=True), + "has_more_resources": Property(mutable=True), + "rule_criteria": Property(mutable=True, json_object=RuleCriteria), + "trigger_conditions": Property( + mutable=True, json_object=TriggerConditions + ), + "alert_channels": Property(mutable=True, json_object=Alerts), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "updated_by": Property(), + "created_by": Property(), + "entity_ids": Property(mutable=True), + "description": Property(mutable=True), + "service_class": Property(alias_of="class"), + } + + +@dataclass +class EmailChannelContent(JSONObject): + """ + Represents the content for an email alert channel. + """ + + email_addresses: Optional[List[str]] = None + + +@dataclass +class ChannelContent(JSONObject): + """ + Represents the content block for an AlertChannel, which varies by channel type. + """ + + email: Optional[EmailChannelContent] = None + # Other channel types like 'webhook', 'slack' could be added here as Optional fields. + + +@dataclass +class EmailDetails(JSONObject): + """ + Represents email-specific details for an alert channel. + """ + + usernames: Optional[List[str]] = None + recipient_type: Optional[str] = None + + +@dataclass +class ChannelDetails(JSONObject): + """ + Represents the details block for an AlertChannel, which varies by channel type. + """ + + email: Optional[EmailDetails] = None + + +@dataclass +class AlertInfo(JSONObject): + """ + Represents a reference to alerts associated with an alert channel. + Fields: + - url: str - API URL to fetch the alerts for this channel + - type: str - Type identifier (e.g., 'alerts-definitions') + - alert_count: int - Number of alerts associated with this channel + """ + + url: str = "" + _type: str = field(default="", metadata={"json_key": "type"}) + alert_count: int = 0 + + +class AlertChannel(Base): + """ + Represents an alert channel used to deliver notifications when alerts + fire. Alert channels define a destination and configuration for + notifications (for example: email lists, webhooks, PagerDuty, Slack, etc.). + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-alert-channels + + This class maps to the Monitor API's `/monitor/alert-channels` resource + and is used by the SDK to list, load, and inspect channels. + + NOTE: Only read operations are supported for AlertChannel at this time. + Create, update, and delete (CRUD) operations are not allowed. + """ + + api_endpoint = "/monitor/alert-channels/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(), + "type": Property(), + "channel_type": Property(), + "details": Property(mutable=False, json_object=ChannelDetails), + "alerts": Property(mutable=False, json_object=AlertInfo), + "content": Property(mutable=False, json_object=ChannelContent), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "created_by": Property(), + "updated_by": Property(), + } diff --git a/linode_api4/objects/monitor_api.py b/linode_api4/objects/monitor_api.py new file mode 100644 index 000000000..c3496668c --- /dev/null +++ b/linode_api4/objects/monitor_api.py @@ -0,0 +1,44 @@ +__all__ = [ + "EntityMetrics", + "EntityMetricsData", + "EntityMetricsDataResult", + "EntityMetricsStats", + "EntityMetricOptions", +] +from dataclasses import dataclass, field +from typing import List, Optional + +from linode_api4.objects.monitor import AggregateFunction +from linode_api4.objects.serializable import JSONObject + + +@dataclass +class EntityMetricsStats(JSONObject): + executionTimeMsec: int = 0 + seriesFetched: str = "" + + +@dataclass +class EntityMetricsDataResult(JSONObject): + metric: dict = field(default_factory=dict) + values: list = field(default_factory=list) + + +@dataclass +class EntityMetricsData(JSONObject): + result: Optional[List[EntityMetricsDataResult]] = None + resultType: str = "" + + +@dataclass +class EntityMetrics(JSONObject): + data: Optional[EntityMetricsData] = None + isPartial: bool = False + stats: Optional[EntityMetricsStats] = None + status: str = "" + + +@dataclass +class EntityMetricOptions(JSONObject): + name: str = "" + aggregate_function: AggregateFunction = "" diff --git a/linode_api4/objects/networking.py b/linode_api4/objects/networking.py new file mode 100644 index 000000000..ed975ab71 --- /dev/null +++ b/linode_api4/objects/networking.py @@ -0,0 +1,426 @@ +from dataclasses import dataclass, field +from typing import List, Optional + +from linode_api4.common import Price, RegionPrice +from linode_api4.errors import UnexpectedResponseError +from linode_api4.objects.base import Base, Property +from linode_api4.objects.dbase import DerivedBase +from linode_api4.objects.region import Region +from linode_api4.objects.serializable import JSONObject + + +class IPv6Pool(Base): + """ + DEPRECATED + """ + + api_endpoint = "/networking/ipv6/pools/{range}" + id_attribute = "range" + + properties = { + "range": Property(identifier=True), + "region": Property(slug_relationship=Region), + } + + +class IPv6Range(Base): + """ + An instance of a Linode IPv6 Range. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-ipv6-range + """ + + api_endpoint = "/networking/ipv6/ranges/{range}" + id_attribute = "range" + + properties = { + "range": Property(identifier=True), + "region": Property(slug_relationship=Region), + "prefix": Property(), + "route_target": Property(), + "linodes": Property( + unordered=True, + ), + "is_bgp": Property(), + } + + +@dataclass +class InstanceIPNAT1To1(JSONObject): + """ + InstanceIPNAT1To1 contains information about the NAT 1:1 mapping + of VPC IP together with the VPC and subnet ids. + """ + + address: str = "" + subnet_id: int = 0 + vpc_id: int = 0 + + +class IPAddress(Base): + """ + note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + Represents a Linode IP address object. + + When attempting to reset the `rdns` field to default, consider using the ExplicitNullValue class:: + + ip = IPAddress(client, "127.0.0.1") + ip.rdns = ExplicitNullValue + ip.save() + + # Re-populate all attributes with new information from the API + ip.invalidate() + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-ip + """ + + api_endpoint = "/networking/ips/{address}" + id_attribute = "address" + + properties = { + "address": Property(identifier=True), + "gateway": Property(), + "subnet_mask": Property(), + "prefix": Property(), + "type": Property(), + "public": Property(), + "rdns": Property(mutable=True), + "linode_id": Property(), + "interface_id": Property(), + "region": Property(slug_relationship=Region), + "vpc_nat_1_1": Property(json_object=InstanceIPNAT1To1), + } + + @property + def linode(self): + from .linode import Instance # pylint: disable-all + + if not hasattr(self, "_linode"): + self._set("_linode", Instance(self._client, self.linode_id)) + + return self._linode + + @property + def interface(self) -> Optional["LinodeInterface"]: + """ + Returns the Linode Interface associated with this IP address. + + NOTE: This function will only return Linode interfaces, not Config interfaces. + + NOTE: Linode interfaces may not currently be available to all users. + + :returns: The Linode Interface associated with this IP address. + :rtype: LinodeInterface + """ + + from .linode_interfaces import LinodeInterface # pylint: disable-all + + if self.interface_id in (None, 0): + self._set("_interface", None) + elif not hasattr(self, "_interface"): + self._set( + "_interface", + LinodeInterface( + self._client, self.linode_id, self.interface_id + ), + ) + + return self._interface + + def to(self, linode): + """ + This is a helper method for ip-assign, and should not be used outside + of that context. It's used to cleanly build an IP Assign request with + pretty python syntax. + """ + from .linode import Instance # pylint: disable-all + + if not isinstance(linode, Instance): + raise ValueError("IP Address can only be assigned to a Linode!") + + return {"address": self.address, "linode_id": linode.id} + + def delete(self): + """ + Override the delete() function from Base to use the correct endpoint. + """ + resp = self._client.delete( + "/linode/instances/{}/ips/{}".format(self.linode_id, self.address), + model=self, + ) + + if "error" in resp: + return False + self.invalidate() + return True + + +@dataclass +class VPCIPAddressIPv6(JSONObject): + slaac_address: str = "" + + +@dataclass +class VPCIPAddress(JSONObject): + """ + VPCIPAddress represents the IP address of a VPC. + + NOTE: This is not implemented as a typical API object (Base) because VPC IPs + cannot be refreshed through the /networking/ips/{address} endpoint. + """ + + address: str = "" + gateway: str = "" + region: str = "" + subnet_mask: str = "" + vpc_id: int = 0 + subnet_id: int = 0 + linode_id: int = 0 + config_id: int = 0 + interface_id: int = 0 + prefix: int = 0 + + active: bool = False + + address_range: Optional[str] = None + nat_1_1: Optional[str] = None + + ipv6_range: Optional[str] = None + ipv6_is_public: Optional[bool] = None + ipv6_addresses: Optional[List[VPCIPAddressIPv6]] = None + + +class VLAN(Base): + """ + .. note:: At this time, the Linode API only supports listing VLANs. + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + An instance of a Linode VLAN. + VLANs provide a mechanism for secure communication between two or more Linodes that are assigned to the same VLAN. + VLANs are implicitly created during Instance or Instance Config creation. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-vlans + """ + + api_endpoint = "/networking/vlans/{label}" + id_attribute = "label" + + properties = { + "label": Property(identifier=True), + "created": Property(is_datetime=True), + "linodes": Property(unordered=True), + "region": Property(slug_relationship=Region), + } + + +@dataclass +class FirewallCreateDevicesOptions(JSONObject): + """ + Represents devices to create created alongside a Linode Firewall. + """ + + linodes: List[int] = field(default_factory=list) + nodebalancers: List[int] = field(default_factory=list) + linode_interfaces: List[int] = field(default_factory=list) + + +@dataclass +class FirewallSettingsDefaultFirewallIDs(JSONObject): + """ + Contains the IDs of Linode Firewalls that should be used by default + when creating various interface types. + + NOTE: This feature may not currently be available to all users. + """ + + include_none_values = True + + vpc_interface: Optional[int] = None + public_interface: Optional[int] = None + linode: Optional[int] = None + nodebalancer: Optional[int] = None + + +class FirewallSettings(Base): + """ + Represents the Firewall settings for the current user. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-firewall-settings + + NOTE: This feature may not currently be available to all users. + """ + + api_endpoint = "/networking/firewalls/settings" + + properties = { + "default_firewall_ids": Property( + json_object=FirewallSettingsDefaultFirewallIDs, + mutable=True, + ), + } + + +class FirewallDevice(DerivedBase): + """ + An object representing the assignment between a Linode Firewall and another Linode resource. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-firewall-device + """ + + api_endpoint = "/networking/firewalls/{firewall_id}/devices/{id}" + derived_url_path = "devices" + parent_id_name = "firewall_id" + + properties = { + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "entity": Property(), + "id": Property(identifier=True), + } + + +class Firewall(Base): + """ + .. note:: This endpoint is in beta. This will only function if base_url is set to `https://api.linode.com/v4beta`. + + An instance of a Linode Cloud Firewall. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-firewall + """ + + api_endpoint = "/networking/firewalls/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "tags": Property(mutable=True, unordered=True), + "status": Property(mutable=True), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "devices": Property(derived_class=FirewallDevice), + "rules": Property(), + } + + def update_rules(self, rules): + """ + Sets the JSON rules for this Firewall. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/put-firewall-rules + + :param rules: The rules to apply to this Firewall. + :type rules: dict + """ + self._client.put( + "{}/rules".format(self.api_endpoint), model=self, data=rules + ) + self.invalidate() + + def get_rules(self): + """ + Gets the JSON rules for this Firewall. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/put-firewall-rules + + :returns: The rules that this Firewall is currently configured with. + :rtype: dict + """ + return self._client.get( + "{}/rules".format(self.api_endpoint), model=self + ) + + @property + def rule_versions(self): + """ + Gets the JSON rule versions for this Firewall. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-firewall-rule-versions + + :returns: Lists the current and historical rules of the firewall (that is not deleted), + using version. Whenever the rules update, the version increments from 1. + :rtype: dict + """ + return self._client.get( + "{}/history".format(self.api_endpoint), model=self + ) + + def get_rule_version(self, version): + """ + Gets the JSON for a specific rule version for this Firewall. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-firewall-rule-version + + :param version: The firewall rule version to view. + :type version: int + + :returns: Gets a specific firewall rule version for an enabled or disabled firewall. + :rtype: dict + """ + return self._client.get( + "{}/history/rules/{}".format(self.api_endpoint, version), model=self + ) + + def device_create(self, id, type="linode", **kwargs): + """ + Creates and attaches a device to this Firewall + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-firewall-device + + :param id: The ID of the entity to create a device for. + :type id: int + + :param type: The type of entity the device is being created for. (`linode`) + :type type: str + """ + params = { + "id": id, + "type": type, + } + params.update(kwargs) + + result = self._client.post( + "{}/devices".format(Firewall.api_endpoint), model=self, data=params + ) + self.invalidate() + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response creating device!", json=result + ) + + c = FirewallDevice(self._client, result["id"], self.id, result) + return c + + +class FirewallTemplate(Base): + """ + Represents a single Linode Firewall template. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-firewall-template + + NOTE: This feature may not currently be available to all users. + """ + + api_endpoint = "/networking/firewalls/templates/{slug}" + + id_attribute = "slug" + + properties = {"slug": Property(identifier=True), "rules": Property()} + + +class NetworkTransferPrice(Base): + """ + An NetworkTransferPrice represents the structure of a valid network transfer price. + Currently the NetworkTransferPrice can only be retrieved by listing, i.e.: + types = client.networking.transfer_prices() + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-network-transfer-prices + """ + + properties = { + "id": Property(identifier=True), + "label": Property(), + "price": Property(json_object=Price), + "region_prices": Property(json_object=RegionPrice), + "transfer": Property(), + } diff --git a/linode_api4/objects/nodebalancer.py b/linode_api4/objects/nodebalancer.py new file mode 100644 index 000000000..f70553295 --- /dev/null +++ b/linode_api4/objects/nodebalancer.py @@ -0,0 +1,360 @@ +from pathlib import Path +from urllib import parse + +from linode_api4.common import Price, RegionPrice +from linode_api4.errors import UnexpectedResponseError +from linode_api4.objects.base import Base, MappedObject, Property +from linode_api4.objects.dbase import DerivedBase +from linode_api4.objects.networking import Firewall, IPAddress +from linode_api4.objects.region import Region + + +class NodeBalancerType(Base): + """ + An NodeBalancerType represents the structure of a valid NodeBalancer type. + Currently the NodeBalancerType can only be retrieved by listing, i.e.: + types = client.nodebalancers.types() + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-node-balancer-types + """ + + properties = { + "id": Property(identifier=True), + "label": Property(), + "price": Property(json_object=Price), + "region_prices": Property(json_object=RegionPrice), + "transfer": Property(), + } + + +class NodeBalancerNode(DerivedBase): + """ + The information about a single Node, a backend for this NodeBalancerโ€™s configured port. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-node-balancer-node + """ + + api_endpoint = ( + "/nodebalancers/{nodebalancer_id}/configs/{config_id}/nodes/{id}" + ) + derived_url_path = "nodes" + parent_id_name = "config_id" + + properties = { + "id": Property(identifier=True), + "config_id": Property(identifier=True), + "nodebalancer_id": Property(identifier=True), + "label": Property(mutable=True), + "address": Property(mutable=True), + "weight": Property(mutable=True), + "mode": Property(mutable=True), + "status": Property(), + "tags": Property(mutable=True, unordered=True), + } + + def __init__(self, client, id, parent_id, nodebalancer_id=None, json=None): + """ + We need a special constructor here because this object's parent + has a parent itself. + """ + if not nodebalancer_id and not isinstance(parent_id, tuple): + raise ValueError( + "NodeBalancerNode must either be created with a nodebalancer_id or a tuple of " + "(config_id, nodebalancer_id) for parent_id!" + ) + + if isinstance(parent_id, tuple): + nodebalancer_id = parent_id[1] + parent_id = parent_id[0] + + DerivedBase.__init__(self, client, id, parent_id, json=json) + + self._set("nodebalancer_id", nodebalancer_id) + + +class NodeBalancerConfig(DerivedBase): + """ + The configuration information for a single port of this NodeBalancer. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-node-balancer-config + + NOTE: UDP NodeBalancer Configs may not currently be available to all users. + """ + + api_endpoint = "/nodebalancers/{nodebalancer_id}/configs/{id}" + derived_url_path = "configs" + parent_id_name = "nodebalancer_id" + + properties = { + "id": Property(identifier=True), + "nodebalancer_id": Property(identifier=True), + "port": Property(mutable=True), + "protocol": Property(mutable=True), + "algorithm": Property(mutable=True), + "stickiness": Property(mutable=True), + "check": Property(mutable=True), + "check_interval": Property(mutable=True), + "check_timeout": Property(mutable=True), + "check_attempts": Property(mutable=True), + "check_path": Property(mutable=True), + "check_body": Property(mutable=True), + "check_passive": Property(mutable=True), + "udp_check_port": Property(mutable=True), + "udp_session_timeout": Property(), + "ssl_cert": Property(mutable=True), + "ssl_key": Property(mutable=True), + "ssl_commonname": Property(), + "ssl_fingerprint": Property(), + "cipher_suite": Property(mutable=True), + "nodes_status": Property(), + "proxy_protocol": Property(mutable=True), + } + + def _serialize(self, is_put: bool = False): + """ + This override removes the `cipher_suite` field from the PUT request + body on calls to save(...) for UDP configs, which is rejected by + the API. + """ + + result = super()._serialize(is_put) + + if is_put and result["protocol"] == "udp" and "cipher_suite" in result: + del result["cipher_suite"] + + return result + + @property + def nodes(self): + """ + This is a special derived_class relationship because NodeBalancerNode is the + only api object that requires two parent_ids + + Returns a paginated list of NodeBalancer nodes associated with this Config. + These are the backends that will be sent traffic for this port. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-node-balancer-config-nodes + + :returns: A paginated list of NodeBalancer nodes. + :rtype: PaginatedList of NodeBalancerNode + """ + if not hasattr(self, "_nodes"): + base_url = "{}/{}".format( + NodeBalancerConfig.api_endpoint, + NodeBalancerNode.derived_url_path, + ) + result = self._client._get_objects( + base_url, + NodeBalancerNode, + model=self, + parent_id=(self.id, self.nodebalancer_id), + ) + + self._set("_nodes", result) + + return self._nodes + + def node_create(self, label, address, **kwargs): + """ + Creates a NodeBalancer Node, a backend that can accept traffic for this + NodeBalancer Config. Nodes are routed requests on the configured port based + on their status. + + API documentation: https://techdocs.akamai.com/linode-api/reference/post-node-balancer-node + + :param address: The private IP Address where this backend can be reached. + This must be a private IP address. + :type address: str + + :param label: The label for this node. This is for display purposes only. + Must have a length between 2 and 32 characters. + :type label: str + + :returns: The node which is created successfully. + :rtype: NodeBalancerNode + """ + params = { + "label": label, + "address": address, + } + params.update(kwargs) + + result = self._client.post( + "{}/nodes".format(NodeBalancerConfig.api_endpoint), + model=self, + data=params, + ) + self.invalidate() + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response creating node!", json=result + ) + + # this is three levels deep, so we need a special constructor + n = NodeBalancerNode( + self._client, result["id"], self.id, self.nodebalancer_id, result + ) + return n + + def load_ssl_data(self, cert_file, key_file): + """ + A convenience method that loads a cert and a key from files and sets them + on this object. This can make enabling ssl easier (instead of you needing + to load the files yourself). + + This does *not* change protocol/port for you, or save anything. Once this + is called, you must still call `save()` on this object for the changes to + take effect. + + :param cert_file: A path to the file containing the public certificate + :type cert_file: str + :param key_file: A path to the file containing the unpassphrased private key + :type key_file: str + """ + # access a server-loaded field to ensure this object is loaded from the + # server before setting values. Failing to do this can cause an unloaded + # object to overwrite these values on a subsequent load, which happens to + # occur on a save() + _ = self.ssl_fingerprint + + # we're disabling warnings here because these attributes are defined dynamically + # through linode.objects.Base, and pylint isn't privy + cert_path = Path(cert_file).expanduser() + if cert_path.is_file(): + with open(cert_path) as f: + self.ssl_cert = f.read() + + key_path = Path(key_file).expanduser() + if key_path.is_file(): + with open(key_path) as f: + self.ssl_key = f.read() + + +class NodeBalancer(Base): + """ + A single NodeBalancer you can access. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-node-balancer + """ + + api_endpoint = "/nodebalancers/{id}" + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "hostname": Property(), + "client_conn_throttle": Property(mutable=True), + "status": Property(), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "ipv4": Property(relationship=IPAddress), + "ipv6": Property(), + "region": Property(slug_relationship=Region), + "configs": Property(derived_class=NodeBalancerConfig), + "transfer": Property(), + "tags": Property(mutable=True, unordered=True), + "client_udp_sess_throttle": Property(mutable=True), + "locks": Property(unordered=True), + } + + # create derived objects + def config_create(self, **kwargs): + """ + Creates a NodeBalancer Config, which allows the NodeBalancer to accept traffic + on a new port. You will need to add NodeBalancer Nodes to the new Config before + it can actually serve requests. + + API documentation: https://techdocs.akamai.com/linode-api/reference/post-node-balancer-config + + :returns: The config that created successfully. + :rtype: NodeBalancerConfig + """ + params = kwargs + + result = self._client.post( + "{}/configs".format(NodeBalancer.api_endpoint), + model=self, + data=params, + ) + self.invalidate() + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response creating config!", json=result + ) + + c = NodeBalancerConfig(self._client, result["id"], self.id, result) + return c + + def config_rebuild(self, config_id, nodes, **kwargs): + """ + Rebuilds a NodeBalancer Config and its Nodes that you have permission to modify. + Use this command to update a NodeBalancerโ€™s Config and Nodes with a single request. + + API documentation: https://techdocs.akamai.com/linode-api/reference/post-rebuild-node-balancer-config + + :param config_id: The ID of the Config to access. + :type config_id: int + + :param nodes: The NodeBalancer Node(s) that serve this Config. + :type nodes: [{ address: str, id: int, label: str, mode: str, weight: int }] + + :returns: A nodebalancer config that rebuilt successfully. + :rtype: NodeBalancerConfig + """ + params = { + "nodes": nodes, + } + params.update(kwargs) + + result = self._client.post( + "{}/configs/{}/rebuild".format( + NodeBalancer.api_endpoint, parse.quote(str(config_id)) + ), + model=self, + data=params, + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response rebuilding config!", json=result + ) + + return NodeBalancerConfig(self._client, result["id"], self.id, result) + + def statistics(self): + """ + Returns detailed statistics about the requested NodeBalancer. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-node-balancer-stats + + :returns: The requested stats. + :rtype: MappedObject + """ + result = self._client.get( + "{}/stats".format(NodeBalancer.api_endpoint), model=self + ) + + if not "title" in result: + raise UnexpectedResponseError( + "Unexpected response generating stats!", json=result + ) + return MappedObject(**result) + + def firewalls(self): + """ + View Firewall information for Firewalls associated with this NodeBalancer. + + API Documentation: https://www.linode.com/docs/api/nodebalancers/#nodebalancer-firewalls-list + + :returns: A List of Firewalls of the Linode NodeBalancer. + :rtype: List[Firewall] + """ + result = self._client.get( + "{}/firewalls".format(NodeBalancer.api_endpoint), model=self + ) + + return [ + Firewall(self._client, firewall["id"]) + for firewall in result["data"] + ] diff --git a/linode_api4/objects/object_storage.py b/linode_api4/objects/object_storage.py new file mode 100644 index 000000000..a2e61405f --- /dev/null +++ b/linode_api4/objects/object_storage.py @@ -0,0 +1,616 @@ +from dataclasses import dataclass +from typing import Optional +from urllib import parse + +from deprecated import deprecated + +from linode_api4.common import Price, RegionPrice +from linode_api4.errors import UnexpectedResponseError +from linode_api4.objects import ( + Base, + DerivedBase, + MappedObject, + Property, + Region, +) +from linode_api4.objects.serializable import JSONObject, StrEnum +from linode_api4.util import drop_null_keys + + +class ObjectStorageACL(StrEnum): + PRIVATE = "private" + PUBLIC_READ = "public-read" + AUTHENTICATED_READ = "authenticated-read" + PUBLIC_READ_WRITE = "public-read-write" + CUSTOM = "custom" + + +class ObjectStorageKeyPermission(StrEnum): + READ_ONLY = "read_only" + READ_WRITE = "read_write" + + +class ObjectStorageEndpointType(StrEnum): + E0 = "E0" + E1 = "E1" + E2 = "E2" + E3 = "E3" + + +@dataclass +class ObjectStorageEndpoint(JSONObject): + """ + ObjectStorageEndpoint contains the core fields of an object storage endpoint object. + + NOTE: This is not implemented as a typical API object (Base) because Object Storage Endpoints + cannot be refreshed, as there is no singular GET endpoint. + """ + + region: str = "" + endpoint_type: ObjectStorageEndpointType = "" + s3_endpoint: Optional[str] = None + + +@dataclass +class ObjectStorageQuotaUsage(JSONObject): + """ + ObjectStorageQuotaUsage contains the fields of an object storage quota usage information. + """ + + quota_limit: int = 0 + usage: int = 0 + + +class ObjectStorageType(Base): + """ + An ObjectStorageType represents the structure of a valid Object Storage type. + Currently, the ObjectStorageType can only be retrieved by listing, i.e.: + types = client.object_storage.types() + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-types + """ + + properties = { + "id": Property(identifier=True), + "label": Property(), + "price": Property(json_object=Price), + "region_prices": Property(json_object=RegionPrice), + "transfer": Property(), + } + + +class ObjectStorageBucket(DerivedBase): + """ + A bucket where objects are stored in. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-bucket + """ + + api_endpoint = "/object-storage/buckets/{region}/{label}" + parent_id_name = "region" + id_attribute = "label" + + properties = { + "region": Property(identifier=True), + "cluster": Property(), + "created": Property(is_datetime=True), + "hostname": Property(), + "label": Property(identifier=True), + "objects": Property(), + "size": Property(), + "endpoint_type": Property(), + "s3_endpoint": Property(), + } + + @classmethod + def api_list(cls): + """ + Override this method to return the correct URL that will produce + a list of JSON objects of this class' type - Object Storage Bucket. + """ + return "/".join(cls.api_endpoint.split("/")[:-2]) + + @classmethod + def make_instance(cls, id, client, parent_id=None, json=None): + """ + Override this method to pass in the parent_id from the _raw_json object + when it's available. + """ + if json is not None: + parent_id = parent_id or json.get("region") or json.get("cluster") + + if parent_id: + return super().make(id, client, cls, parent_id=parent_id, json=json) + else: + raise UnexpectedResponseError( + "Unexpected json response when making a new Object Storage Bucket instance." + ) + + def access_get(self): + """ + Returns a result object which wraps the current access config for this ObjectStorageBucket. + + API Documentation: TODO + + :returns: A result object which wraps the access that this ObjectStorageBucket is currently configured with. + :rtype: MappedObject + """ + result = self._client.get( + "{}/access".format(self.api_endpoint), + model=self, + ) + + if not any( + key in result + for key in ["acl", "acl_xml", "cors_enabled", "cors_xml"] + ): + raise UnexpectedResponseError( + "Unexpected response when getting the bucket access config of a bucket!", + json=result, + ) + + return MappedObject(**result) + + def access_modify( + self, + acl: Optional[ObjectStorageACL] = None, + cors_enabled=None, + ): + """ + Allows changing basic Cross-origin Resource Sharing (CORS) and Access Control + Level (ACL) settings. Only allows enabling/disabling CORS for all origins, + and/or setting canned ACLs. For more fine-grained control of both systems, + please use the more fully-featured S3 API directly. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-object-storage-bucket-access + + :param acl: The Access Control Level of the bucket using a canned ACL string. + For more fine-grained control of ACLs, use the S3 API directly. + :type acl: str + Enum: private,public-read,authenticated-read,public-read-write + + :param cors_enabled: If true, the bucket will be created with CORS enabled for + all origins. For more fine-grained controls of CORS, use + the S3 API directly. + :type cors_enabled: bool + """ + params = { + "acl": acl, + "cors_enabled": cors_enabled, + } + + resp = self._client.post( + f"{self.api_endpoint}/access", + data=drop_null_keys(params), + model=self, + ) + + if "errors" in resp: + raise UnexpectedResponseError( + "Unexpected response when modifying the access to a bucket!", + json=resp, + ) + return True + + def access_update( + self, + acl: Optional[ObjectStorageACL] = None, + cors_enabled=None, + ): + """ + Allows changing basic Cross-origin Resource Sharing (CORS) and Access Control + Level (ACL) settings. Only allows enabling/disabling CORS for all origins, + and/or setting canned ACLs. For more fine-grained control of both systems, + please use the more fully-featured S3 API directly. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/put-storage-bucket-access + + :param acl: The Access Control Level of the bucket using a canned ACL string. + For more fine-grained control of ACLs, use the S3 API directly. + :type acl: str + Enum: private,public-read,authenticated-read,public-read-write + + :param cors_enabled: If true, the bucket will be created with CORS enabled for + all origins. For more fine-grained controls of CORS, + use the S3 API directly. + :type cors_enabled: bool + """ + params = { + "acl": acl, + "cors_enabled": cors_enabled, + } + + resp = self._client.put( + f"{self.api_endpoint}/access", + data=drop_null_keys(params), + model=self, + ) + + if "errors" in resp: + raise UnexpectedResponseError( + "Unexpected response when updating the access to a bucket!", + json=resp, + ) + return True + + def ssl_cert_delete(self): + """ + Deletes this Object Storage bucketโ€™s user uploaded TLS/SSL certificate + and private key. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/delete-object-storage-ssl + + :returns: True if the TLS/SSL certificate and private key in the bucket were successfully deleted. + :rtype: bool + """ + + resp = self._client.delete( + f"{self.api_endpoint}/ssl", + model=self, + ) + + if "error" in resp: + raise UnexpectedResponseError( + "Unexpected response when deleting a bucket!", + json=resp, + ) + return True + + def ssl_cert(self): + """ + Returns a result object which wraps a bool value indicating + if this bucket has a corresponding TLS/SSL certificate that + was uploaded by an Account user. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-ssl + + :returns: A result object which has a bool field indicating if this Bucket has a corresponding + TLS/SSL certificate that was uploaded by an Account user. + :rtype: MappedObject + """ + result = self._client.get( + f"{self.api_endpoint}/ssl", + model=self, + ) + + if not "ssl" in result: + raise UnexpectedResponseError( + "Unexpected response when getting the TLS/SSL certs indicator of a bucket!", + json=result, + ) + + return MappedObject(**result) + + def ssl_cert_upload(self, certificate, private_key): + """ + Upload a TLS/SSL certificate and private key to be served when you + visit your Object Storage bucket via HTTPS. Your TLS/SSL certificate and + private key are stored encrypted at rest. + + To replace an expired certificate, delete your current certificate and + upload a new one. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-object-storage-ssl + + :param certificate: Your Base64 encoded and PEM formatted SSL certificate. + Line breaks must be represented as โ€œ\nโ€ in the string + for requests (but not when using the Linode CLI) + :type certificate: str + + :param private_key: The private key associated with this TLS/SSL certificate. + Line breaks must be represented as โ€œ\nโ€ in the string + for requests (but not when using the Linode CLI) + :type private_key: str + + :returns: A result object which has a bool field indicating if this Bucket has a corresponding + TLS/SSL certificate that was uploaded by an Account user. + :rtype: MappedObject + """ + params = { + "certificate": certificate, + "private_key": private_key, + } + result = self._client.post( + f"{self.api_endpoint}/ssl", + data=params, + model=self, + ) + + if not "ssl" in result: + raise UnexpectedResponseError( + "Unexpected response when uploading TLS/SSL certs!", + json=result, + ) + + return MappedObject(**result) + + def contents( + self, + marker=None, + delimiter=None, + prefix=None, + page_size=100, + ): + """ + Returns the contents of a bucket. + The contents are paginated using a marker, which is the name of the last object + on the previous page. Objects may be filtered by prefix and delimiter as well; + see Query Parameters for more information. + + This endpoint is available for convenience. + It is recommended that instead you use the more fully-featured S3 API directly. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-bucket-content + + :param marker: The โ€œmarkerโ€ for this request, which can be used to paginate + through large buckets. Its value should be the value of the + next_marker property returned with the last page. Listing + bucket contents does not support arbitrary page access. See the + next_marker property in the responses section for more details. + :type marker: str + + :param delimiter: The delimiter for object names; if given, object names will + be returned up to the first occurrence of this character. + This is most commonly used with the / character to allow + bucket transversal in a manner similar to a filesystem, + however any delimiter may be used. Use in conjunction with + prefix to see object names past the first occurrence of + the delimiter. + :type delimiter: str + + :param prefix: Filters objects returned to only those whose name start with + the given prefix. Commonly used in conjunction with delimiter + to allow transversal of bucket contents in a manner similar to + a filesystem. + :type perfix: str + + :param page_size: The number of items to return per page. Defaults to 100. + :type page_size: int 25..500 + + :returns: A list of the MappedObject of the requested bucket's contents. + :rtype: [MappedObject] + """ + params = { + "marker": marker, + "delimiter": delimiter, + "prefix": prefix, + "page_size": page_size, + } + result = self._client.get( + f"{self.api_endpoint}/object-list", + data=drop_null_keys(params), + model=self, + ) + + if not "data" in result: + raise UnexpectedResponseError( + "Unexpected response when getting the contents of a bucket!", + json=result, + ) + + return [MappedObject(**c) for c in result["data"]] + + def object_acl_config(self, name=None): + """ + View an Objectโ€™s configured Access Control List (ACL) in this Object Storage + bucket. ACLs define who can access your buckets and objects and specify the + level of access granted to those users. + + This endpoint is available for convenience. + It is recommended that instead you use the more fully-featured S3 API directly. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-bucket-acl + + :param name: The name of the object for which to retrieve its Access Control + List (ACL). Use the Object Storage Bucket Contents List endpoint + to access all object names in a bucket. + :type name: str + + :returns: The Object's canned ACL and policy. + :rtype: MappedObject + """ + params = { + "name": name, + } + + result = self._client.get( + f"{type(self).api_endpoint}/object-acl", + model=self, + data=drop_null_keys(params), + ) + + if not "acl" in result: + raise UnexpectedResponseError( + "Unexpected response when viewing Objectโ€™s configured ACL!", + json=result, + ) + + return MappedObject(**result) + + def object_acl_config_update(self, acl: ObjectStorageACL, name): + """ + Update an Objectโ€™s configured Access Control List (ACL) in this Object Storage + bucket. ACLs define who can access your buckets and objects and specify the + level of access granted to those users. + + This endpoint is available for convenience. + It is recommended that instead you use the more fully-featured S3 API directly. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/put-object-storage-bucket-acl + + :param acl: The Access Control Level of the bucket, as a canned ACL string. + For more fine-grained control of ACLs, use the S3 API directly. + :type acl: str + Enum: private,public-read,authenticated-read,public-read-write,custom + + :param name: The name of the object for which to retrieve its Access Control + List (ACL). Use the Object Storage Bucket Contents List endpoint + to access all object names in a bucket. + :type name: str + + :returns: The Object's canned ACL and policy. + :rtype: MappedObject + """ + params = { + "acl": acl, + "name": name, + } + + result = self._client.put( + f"{type(self).api_endpoint}/object-acl", + model=self, + data=params, + ) + + if not "acl" in result: + raise UnexpectedResponseError( + "Unexpected response when updating Objectโ€™s configured ACL!", + json=result, + ) + + return MappedObject(**result) + + @deprecated( + reason=( + "'access' method has been deprecated in favor of the class method " + "'bucket_access' in ObjectStorageGroup, which can be accessed by " + "'client.object_storage.access'" + ) + ) + def access(self, cluster, bucket_name, permissions): + """ + Returns a dict formatted to be included in the `bucket_access` argument + of :any:`keys_create`. See the docs for that method for an example of + usage. + + :param cluster: The Object Storage cluster to grant access in. + :type cluster: :any:`ObjectStorageCluster` or str + :param bucket_name: The name of the bucket to grant access to. + :type bucket_name: str + :param permissions: The permissions to grant. Should be one of "read_only" + or "read_write". + :type permissions: str + + :returns: A dict formatted correctly for specifying bucket access for + new keys. + :rtype: dict + """ + return { + "cluster": cluster, + "bucket_name": bucket_name, + "permissions": permissions, + } + + +@deprecated( + reason="deprecated to use regions list API for viewing available OJB clusters" +) +class ObjectStorageCluster(Base): + """ + This class will be deprecated to use the regions list to view available OBJ clusters, + and a new access key API will directly expose the S3 endpoint hostname. + + A cluster where Object Storage is available. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-cluster + """ + + api_endpoint = "/object-storage/clusters/{id}" + + properties = { + "id": Property(identifier=True), + "region": Property(slug_relationship=Region), + "status": Property(), + "domain": Property(), + "static_site_domain": Property(), + } + + @deprecated( + reason=( + "'buckets_in_cluster' method has been deprecated, please consider " + "switching to 'buckets_in_region' in the object storage group (can " + "be accessed via 'client.object_storage.buckets_in_cluster')." + ) + ) + def buckets_in_cluster(self, *filters): + """ + Returns a list of Buckets in this cluster belonging to this Account. + + This endpoint is available for convenience. + It is recommended that instead you use the more fully-featured S3 API directly. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-bucketin-cluster + + :param filters: Any number of filters to apply to this query. + See :doc:`Filtering Collections` + for more details on filtering. + + :returns: A list of Object Storage Buckets that in the requested cluster. + :rtype: PaginatedList of ObjectStorageBucket + """ + + return self._client._get_and_filter( + ObjectStorageBucket, + *filters, + endpoint="/object-storage/buckets/{}".format( + parse.quote(str(self.id)) + ), + ) + + +class ObjectStorageKeys(Base): + """ + A keypair that allows third-party applications to access Linode Object Storage. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-key + """ + + api_endpoint = "/object-storage/keys/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "access_key": Property(), + "secret_key": Property(), + "bucket_access": Property(), + "limited": Property(), + "regions": Property(unordered=True), + } + + +class ObjectStorageQuota(Base): + """ + An Object Storage related quota information on your account. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-quota + """ + + api_endpoint = "/object-storage/quotas/{quota_id}" + id_attribute = "quota_id" + + properties = { + "quota_id": Property(identifier=True), + "quota_name": Property(), + "endpoint_type": Property(), + "s3_endpoint": Property(), + "description": Property(), + "quota_limit": Property(), + "resource_metric": Property(), + } + + def usage(self): + """ + Gets usage data for a specific ObjectStorage Quota resource you can have on your account and the current usage for that resource. + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-object-storage-quota-usage + + :returns: The Object Storage Quota usage. + :rtype: ObjectStorageQuotaUsage + """ + + result = self._client.get( + f"{type(self).api_endpoint}/usage", + model=self, + ) + + return ObjectStorageQuotaUsage.from_json(result) diff --git a/linode_api4/objects/placement.py b/linode_api4/objects/placement.py new file mode 100644 index 000000000..e436cf701 --- /dev/null +++ b/linode_api4/objects/placement.py @@ -0,0 +1,128 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +from linode_api4.objects.base import Base, Property +from linode_api4.objects.linode import Instance +from linode_api4.objects.region import Region +from linode_api4.objects.serializable import JSONObject, StrEnum + + +class PlacementGroupType(StrEnum): + """ + An enum class that represents the available types of a Placement Group. + """ + + anti_affinity_local = "anti_affinity:local" + + +class PlacementGroupPolicy(StrEnum): + """ + An enum class that represents the policy for Linode assignments to a Placement Group. + """ + + strict = "strict" + flexible = "flexible" + + +@dataclass +class PlacementGroupMember(JSONObject): + """ + Represents a member of a placement group. + """ + + linode_id: int = 0 + is_compliant: bool = False + + +@dataclass +class MigratedInstance(JSONObject): + """ + The ID for a compute instance being migrated into or out of the placement group. + """ + + linode_id: int = 0 + + +@dataclass +class PlacementGroupMigrations(JSONObject): + """ + Any compute instances that are being migrated to or from the placement group. + Returns an empty object if no migrations are taking place. + """ + + inbound: Optional[List[MigratedInstance]] = None + outbound: Optional[List[MigratedInstance]] = None + + +class PlacementGroup(Base): + """ + NOTE: Placement Groups may not currently be available to all users. + + A VM Placement Group, defining the affinity policy for Linodes + created in a region. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-placement-group + """ + + api_endpoint = "/placement/groups/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "region": Property(slug_relationship=Region), + "placement_group_type": Property(), + "placement_group_policy": Property(), + "is_compliant": Property(), + "members": Property(json_object=PlacementGroupMember), + "migrations": Property(json_object=PlacementGroupMigrations), + } + + def assign( + self, + linodes: List[Union[Instance, int]], + compliant_only: bool = False, + ): + """ + Assigns the specified Linodes to the Placement Group. + + :param linodes: A list of Linodes to assign to the Placement Group. + :type linodes: List[Union[Instance, int]] + """ + params = { + "linodes": [ + v.id if isinstance(v, Instance) else v for v in linodes + ], + "compliant_only": compliant_only, + } + + result = self._client.post( + f"{PlacementGroup.api_endpoint}/assign", model=self, data=params + ) + + # The assign endpoint returns the updated PG, so we can use this + # as an opportunity to refresh the object + self._populate(result) + + def unassign( + self, + linodes: List[Union[Instance, int]], + ): + """ + Unassign the specified Linodes from the Placement Group. + + :param linodes: A list of Linodes to unassign from the Placement Group. + :type linodes: List[Union[Instance, int]] + """ + params = { + "linodes": [ + v.id if isinstance(v, Instance) else v for v in linodes + ], + } + + result = self._client.post( + f"{PlacementGroup.api_endpoint}/unassign", model=self, data=params + ) + + # The unassign endpoint returns the updated PG, so we can use this + # as an opportunity to refresh the object + self._populate(result) diff --git a/linode_api4/objects/profile.py b/linode_api4/objects/profile.py new file mode 100644 index 000000000..c37015e84 --- /dev/null +++ b/linode_api4/objects/profile.py @@ -0,0 +1,241 @@ +from linode_api4.errors import UnexpectedResponseError +from linode_api4.objects import Base, Property + + +class AuthorizedApp(Base): + """ + An application with authorized access to an account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-profile-app + """ + + api_endpoint = "/profile/apps/{id}" + + properties = { + "id": Property(identifier=True), + "scopes": Property(), + "label": Property(), + "created": Property(is_datetime=True), + "expiry": Property(is_datetime=True), + "thumbnail_url": Property(), + "website": Property(), + } + + +class PersonalAccessToken(Base): + """ + A Person Access Token associated with a Profile. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-personal-access-token + """ + + api_endpoint = "/profile/tokens/{id}" + + properties = { + "id": Property(identifier=True), + "scopes": Property(), + "label": Property(mutable=True), + "created": Property(is_datetime=True), + "token": Property(), + "expiry": Property(is_datetime=True), + } + + +class WhitelistEntry(Base): + """ + DEPRECATED: Limited to customers with a feature tag + """ + + api_endpoint = "/profile/whitelist/{id}" + + properties = { + "id": Property(identifier=True), + "address": Property(), + "netmask": Property(), + "note": Property(), + } + + +class Profile(Base): + """ + A Profile containing information about the current User. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-profile + """ + + api_endpoint = "/profile" + id_attribute = "username" + + properties = { + "username": Property(identifier=True), + "uid": Property(), + "email": Property(mutable=True), + "timezone": Property(mutable=True), + "email_notifications": Property(mutable=True), + "referrals": Property(), + "ip_whitelist_enabled": Property(mutable=True), + "lish_auth_method": Property(mutable=True), + "authorized_keys": Property(mutable=True), + "two_factor_auth": Property(), + "restricted": Property(), + "authentication_type": Property(), + "authorized_keys": Property(), + "verified_phone_number": Property(), + } + + def enable_tfa(self): + """ + Enables TFA for the token's user. This requies a follow-up request + to confirm TFA. Returns the TFA secret that needs to be confirmed. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-tfa-enable + + :returns: The TFA secret + :rtype: str + """ + result = self._client.post("/profile/tfa-enable") + + return result["secret"] + + def confirm_tfa(self, code): + """ + Confirms TFA for an account. Needs a TFA code generated by enable_tfa + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-tfa-confirm + + :param code: The Two Factor code you generated with your Two Factor secret. + These codes are time-based, so be sure it is current. + :type code: str + + :returns: Returns true if operation was successful + :rtype: bool + """ + self._client.post( + "/profile/tfa-enable-confirm", data={"tfa_code": code} + ) + + return True + + def disable_tfa(self): + """ + Turns off TFA for this user's account. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-tfa-disable + + :returns: Returns true if operation was successful + :rtype: bool + """ + self._client.post("/profile/tfa-disable") + + return True + + @property + def grants(self): + """ + Returns grants for the current user + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-profile-grants + + :returns: The grants for the current user + :rtype: UserGrants + """ + from linode_api4.objects.account import ( # pylint: disable-all + UserGrants, + ) + + resp = self._client.get( + "/profile/grants" + ) # use special endpoint for restricted users + + grants = None + if resp is not None: + # if resp is None, we're unrestricted and do not have grants + grants = UserGrants(self._client, self.username, resp) + + return grants + + @property + def whitelist(self): + """ + Returns the user's whitelist entries, if whitelist is enabled + + DEPRECATED: Limited to customers with a feature tag + """ + return self._client._get_and_filter(WhitelistEntry) + + def add_whitelist_entry(self, address, netmask, note=None): + """ + Adds a new entry to this user's IP whitelist, if enabled + + DEPRECATED: Limited to customers with a feature tag + """ + result = self._client.post( + "{}/whitelist".format(Profile.api_endpoint), + data={ + "address": address, + "netmask": netmask, + "note": note, + }, + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response creating whitelist entry!" + ) + + return WhitelistEntry(result["id"], self._client, json=result) + + +class SSHKey(Base): + """ + An SSH Public Key uploaded to your profile for use in Linode Instance deployments. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-ssh-key + """ + + api_endpoint = "/profile/sshkeys/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "ssh_key": Property(), + "created": Property(is_datetime=True), + } + + +class TrustedDevice(Base): + """ + A Trusted Device for a User. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-trusted-device + """ + + api_endpoint = "/profile/devices/{id}" + + properties = { + "id": Property(identifier=True), + "created": Property(is_datetime=True), + "expiry": Property(is_datetime=True), + "last_authenticated": Property(is_datetime=True), + "last_remote_addr": Property(), + "user_agent": Property(), + } + + +class ProfileLogin(Base): + """ + A Login object displaying information about a successful account login from this user. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-profile-login + """ + + api_endpoint = "profile/logins/{id}" + + properties = { + "id": Property(identifier=True), + "datetime": Property(is_datetime=True), + "ip": Property(), + "restricted": Property(), + "status": Property(), + "username": Property(), + } diff --git a/linode_api4/objects/region.py b/linode_api4/objects/region.py new file mode 100644 index 000000000..9a77dc485 --- /dev/null +++ b/linode_api4/objects/region.py @@ -0,0 +1,177 @@ +from dataclasses import dataclass +from typing import List, Optional + +from linode_api4.errors import UnexpectedResponseError +from linode_api4.objects.base import Base, JSONObject, Property +from linode_api4.objects.serializable import StrEnum + + +class Capability(StrEnum): + """ + An enum class that represents the capabilities that Linode offers + across different regions and services. + + These capabilities indicate what services are available in each data center. + """ + + linodes = "Linodes" + nodebalancers = "NodeBalancers" + block_storage = "Block Storage" + object_storage = "Object Storage" + object_storage_regions = "Object Storage Access Key Regions" + object_storage_endpoint_types = "Object Storage Endpoint Types" + lke = "Kubernetes" + lke_ha_controlplanes = "LKE HA Control Planes" + lke_e = "Kubernetes Enterprise" + firewall = "Cloud Firewall" + gpu = "GPU Linodes" + vlans = "Vlans" + vpcs = "VPCs" + vpcs_extra = "VPCs Extra" + machine_images = "Machine Images" + dbaas = "Managed Databases" + dbaas_beta = "Managed Databases Beta" + bs_migrations = "Block Storage Migrations" + metadata = "Metadata" + premium_plans = "Premium Plans" + edge_plans = "Edge Plans" + distributed_plans = "Distributed Plans" + lke_control_plane_acl = "LKE Network Access Control List (IP ACL)" + aclb = "Akamai Cloud Load Balancer" + support_ticket_severity = "Support Ticket Severity" + backups = "Backups" + placement_group = "Placement Group" + disk_encryption = "Disk Encryption" + la_disk_encryption = "LA Disk Encryption" + akamai_ram_protection = "Akamai RAM Protection" + blockstorage_encryption = "Block Storage Encryption" + blockstorage_perf_b1 = "Block Storage Performance B1" + blockstorage_perf_b1_default = "Block Storage Performance B1 Default" + aclp = "Akamai Cloud Pulse" + aclp_logs = "Akamai Cloud Pulse Logs" + aclp_logs_lkee = "Akamai Cloud Pulse Logs LKE-E Audit" + aclp_logs_dc_lkee = "ACLP Logs Datacenter LKE-E" + smtp_enabled = "SMTP Enabled" + stackscripts = "StackScripts" + vpu = "NETINT Quadra T1U" + linode_interfaces = "Linode Interfaces" + maintenance_policy = "Maintenance Policy" + vpc_dual_stack = "VPC Dual Stack" + vpc_ipv6_stack = "VPC IPv6 Stack" + nlb = "Network LoadBalancer" + natgateway = "NAT Gateway" + lke_e_byovpc = "Kubernetes Enterprise BYO VPC" + lke_e_stacktype = "Kubernetes Enterprise Dual Stack" + ruleset = "Cloud Firewall Rule Set" + prefixlists = "Cloud Firewall Prefix Lists" + current_prefixlists = "Cloud Firewall Prefix List Current References" + + +@dataclass +class RegionPlacementGroupLimits(JSONObject): + """ + Represents the Placement Group limits for the current account + in a specific region. + """ + + maximum_pgs_per_customer: int = 0 + maximum_linodes_per_pg: int = 0 + + +@dataclass +class RegionMonitors(JSONObject): + """ + Represents the monitor services available in a region. + Lists the services in this region that support metrics and alerts + use with Akamai Cloud Pulse (ACLP). + """ + + alerts: Optional[list[str]] = None + metrics: Optional[list[str]] = None + + +class Region(Base): + """ + A Region. Regions correspond to individual data centers, each located in a different geographical area. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-region + """ + + api_endpoint = "/regions/{id}" + properties = { + "id": Property(identifier=True), + "country": Property(), + "capabilities": Property(unordered=True), + "status": Property(), + "resolvers": Property(), + "label": Property(), + "site_type": Property(), + "placement_group_limits": Property( + json_object=RegionPlacementGroupLimits + ), + "monitors": Property(json_object=RegionMonitors), + } + + @property + def availability(self) -> List["RegionAvailabilityEntry"]: + result = self._client.get( + f"{self.api_endpoint}/availability", model=self + ) + + if result is None: + raise UnexpectedResponseError( + "Expected availability data, got None." + ) + + return [RegionAvailabilityEntry.from_json(v) for v in result] + + @property + def vpc_availability(self) -> "RegionVPCAvailability": + """ + Returns VPC availability data for this region. + + NOTE: IPv6 VPCs may not currently be available to all users. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-region-vpc-availability + + :returns: VPC availability data for this region. + :rtype: RegionVPCAvailability + """ + result = self._client.get( + f"{self.api_endpoint}/vpc-availability", model=self + ) + + if result is None: + raise UnexpectedResponseError( + "Expected VPC availability data, got None." + ) + + return RegionVPCAvailability.from_json(result) + + +@dataclass +class RegionAvailabilityEntry(JSONObject): + """ + Represents the availability of a Linode type within a region. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-region-availability + """ + + region: Optional[str] = None + plan: Optional[str] = None + available: bool = False + + +@dataclass +class RegionVPCAvailability(JSONObject): + """ + Represents the VPC availability data for a region. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-regions-vpc-availability + + NOTE: IPv6 VPCs may not currently be available to all users. + """ + + region: Optional[str] = None + available: bool = False + available_ipv6_prefix_lengths: Optional[List[int]] = None diff --git a/linode_api4/objects/serializable.py b/linode_api4/objects/serializable.py new file mode 100644 index 000000000..c1f59f6d4 --- /dev/null +++ b/linode_api4/objects/serializable.py @@ -0,0 +1,287 @@ +import inspect +from dataclasses import dataclass, fields +from enum import Enum +from types import SimpleNamespace +from typing import ( + Any, + ClassVar, + Dict, + List, + Optional, + Set, + Type, + Union, + get_args, + get_origin, + get_type_hints, +) + +from linode_api4.objects.filtering import FilterableAttribute + +# Wraps the SimpleNamespace class and allows for +# SQLAlchemy-style filter generation on JSONObjects. +JSONFilterGroup = SimpleNamespace + + +class JSONFilterableMetaclass(type): + def __init__(cls, name, bases, dct): + setattr( + cls, + "filters", + JSONFilterGroup( + **{ + k: FilterableAttribute(k) + for k in cls.__annotations__.keys() + } + ), + ) + + super().__init__(name, bases, dct) + + +@dataclass +class JSONObject(metaclass=JSONFilterableMetaclass): + """ + A simple helper class for serializable API objects. + This is typically used for nested object values. + + This class act similarly to MappedObject but with explicit + fields and static typing. + """ + + filters: ClassVar[JSONFilterGroup] = None + """ + A group containing FilterableAttributes used to create SQLAlchemy-style filters. + + Example usage:: + self.client.regions.availability( + RegionAvailabilityEntry.filters.plan == "premium4096.7" + ) + """ + + include_none_values: ClassVar[bool] = False + """ + If true, all None values for this class will be explicitly included in + the serialized output for instance of this class. + """ + + always_include: ClassVar[Set[str]] = {} + """ + A set of keys corresponding to fields that should always be + included in the generated output regardless of whether their values + are None. + """ + + put_class: ClassVar[Optional[Type["JSONObject"]]] = None + """ + An alternative JSONObject class to use as the schema for PUT requests. + This prevents read-only fields from being included in PUT request bodies, + which in theory will result in validation errors from the API. + """ + + def __init__(self): + raise NotImplementedError( + "JSONObject is not intended to be constructed directly" + ) + + # TODO: Implement __repr__ + @staticmethod + def _unwrap_type(field_type: type) -> type: + args = get_args(field_type) + origin_type = get_origin(field_type) + + # We don't want to try to unwrap Dict, List, Set, etc. values + if origin_type is not Union: + return field_type + + if len(args) == 0: + raise TypeError("Expected type to have arguments, got none") + + # Use the first type in the Union's args + return JSONObject._unwrap_type(args[0]) + + @staticmethod + def _try_from_json(json_value: Any, field_type: type): + """ + Determines whether a JSON dict is an instance of a field type. + """ + + field_type = JSONObject._unwrap_type(field_type) + + if inspect.isclass(field_type) and issubclass(field_type, JSONObject): + return field_type.from_json(json_value) + + return json_value + + @classmethod + def _parse_attr_list(cls, json_value: Any, field_type: type): + """ + Attempts to parse a list attribute with a given value and field type. + """ + + # Edge case for optional list values + if json_value is None: + return None + + type_hint_args = get_args(field_type) + + if len(type_hint_args) < 1: + return cls._try_from_json(json_value, field_type) + + return [ + cls._try_from_json(item, type_hint_args[0]) for item in json_value + ] + + @classmethod + def _parse_attr(cls, json_value: Any, field_type: type): + """ + Attempts to parse an attribute with a given value and field type. + """ + + field_type = JSONObject._unwrap_type(field_type) + + if list in (field_type, get_origin(field_type)): + return cls._parse_attr_list(json_value, field_type) + + return cls._try_from_json(json_value, field_type) + + @classmethod + def from_json(cls, json: Dict[str, Any]) -> Optional["JSONObject"]: + """ + Creates an instance of this class from a JSON dict, respecting json_key metadata. + """ + if json is None: + return None + + obj = cls() + + type_hints = get_type_hints(cls) + + for f in fields(cls): + json_key = f.metadata.get("json_key", f.name) + field_type = type_hints.get(f.name) + value = json.get(json_key) + parsed_value = cls._parse_attr(value, field_type) + setattr(obj, f.name, parsed_value) + + return obj + + def _serialize(self, is_put: bool = False) -> Dict[str, Any]: + """ + Serializes this object into a JSON dict. + """ + cls = type(self) + + if is_put and cls.put_class is not None: + cls = cls.put_class + + cls_field_keys = {field.name for field in fields(cls)} + + type_hints = get_type_hints(cls) + + def attempt_serialize(value: Any) -> Any: + """ + Attempts to serialize the given value, else returns the value unchanged. + """ + if issubclass(type(value), JSONObject): + return value._serialize(is_put=is_put) + + # Needed to avoid circular imports without a breaking change + from linode_api4.objects.base import ( # pylint: disable=import-outside-toplevel + ExplicitNullValue, + ) + + if value == ExplicitNullValue or isinstance( + value, ExplicitNullValue + ): + return None + + return value + + def should_include(key: str, value: Any) -> bool: + """ + Returns whether the given key/value pair should be included in the resulting dict. + """ + + # During PUT operations, keys not present in the put_class should be excluded + if key not in cls_field_keys: + return False + + if cls.include_none_values or key in cls.always_include: + return True + + hint = type_hints.get(key) + + # We want to exclude any Optional values that are None + # NOTE: We need to check for Union here because Optional is an alias of Union. + if ( + hint is None + or get_origin(hint) is not Union + or type(None) not in get_args(hint) + ): + return True + + return value is not None + + result = {} + + for f in fields(self): + k = f.name + json_key = f.metadata.get("json_key", k) + v = getattr(self, k) + + if not should_include(k, v): + continue + + if isinstance(v, List): + v = [attempt_serialize(j) for j in v] + elif isinstance(v, Dict): + v = {k: attempt_serialize(j) for k, j in v.items()} + else: + v = attempt_serialize(v) + + result[json_key] = v + + return result + + @property + def dict(self) -> Dict[str, Any]: + """ + Alias for JSONObject._serialize() + """ + return self._serialize() + + # Various dict methods for backwards compat + def __getitem__(self, key) -> Any: + return getattr(self, key) + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __iter__(self) -> Any: + return vars(self) + + def __delitem__(self, key): + setattr(self, key, None) + + def __len__(self): + return len(vars(self)) + + +class StrEnum(str, Enum): + """ + Used for enums that are of type string, which is necessary + for implicit JSON serialization. + + NOTE: Replace this with StrEnum once Python 3.10 has been EOL'd. + See: https://docs.python.org/3/library/enum.html#enum.StrEnum + """ + + def __new__(cls, *values): + value = str(*values) + member = str.__new__(cls, value) + member._value_ = value + return member + + def __str__(self): + return self._value_ diff --git a/linode_api4/objects/support.py b/linode_api4/objects/support.py new file mode 100644 index 000000000..548f58f16 --- /dev/null +++ b/linode_api4/objects/support.py @@ -0,0 +1,190 @@ +from pathlib import Path +from typing import Union + +import requests + +from linode_api4.errors import ApiError, UnexpectedResponseError +from linode_api4.objects import ( + Base, + DerivedBase, + Domain, + Instance, + Property, + Volume, +) +from linode_api4.objects.nodebalancer import NodeBalancer + + +class TicketReply(DerivedBase): + """ + A reply to a Support Ticket. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-ticket-replies + """ + + api_endpoint = "/support/tickets/{ticket_id}/replies" + derived_url_path = "replies" + parent_id_name = "ticket_id" + + properties = { + "id": Property(identifier=True), + "ticket_id": Property(identifier=True), + "description": Property(), + "created": Property(is_datetime=True), + "created_by": Property(), + "from_linode": Property(), + } + + +class SupportTicket(Base): + """ + An objected representing a Linode Support Ticket. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-ticket-replies + """ + + api_endpoint = "/support/tickets/{id}" + properties = { + "id": Property(identifier=True), + "summary": Property(), + "description": Property(), + "status": Property(), + "entity": Property(), + "opened": Property(is_datetime=True), + "closed": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "updated_by": Property(), + "replies": Property(derived_class=TicketReply), + "attachments": Property(), + "closable": Property(), + "gravatar_id": Property(), + "opened_by": Property(), + } + + @property + def linode(self): + """ + If applicable, the Linode referenced in this ticket. + + :returns: The Linode referenced in this ticket. + :rtype: Optional[Instance] + """ + + if self.entity and self.entity.type == "linode": + return Instance(self._client, self.entity.id) + return None + + @property + def domain(self): + """ + If applicable, the Domain referenced in this ticket. + + :returns: The Domain referenced in this ticket. + :rtype: Optional[Domain] + """ + + if self.entity and self.entity.type == "domain": + return Domain(self._client, self.entity.id) + return None + + @property + def nodebalancer(self): + """ + If applicable, the NodeBalancer referenced in this ticket. + + :returns: The NodeBalancer referenced in this ticket. + :rtype: Optional[NodeBalancer] + """ + + if self.entity and self.entity.type == "nodebalancer": + return NodeBalancer(self._client, self.entity.id) + return None + + @property + def volume(self): + """ + If applicable, the Volume referenced in this ticket. + + :returns: The Volume referenced in this ticket. + :rtype: Optional[Volume] + """ + + if self.entity and self.entity.type == "volume": + return Volume(self._client, self.entity.id) + return None + + def post_reply(self, description): + """ + Adds a reply to an existing Support Ticket. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-ticket-reply + + :param description: The content of this Support Ticket Reply. + :type description: str + + :returns: The new TicketReply object. + :rtype: Optional[TicketReply] + """ + + result = self._client.post( + "{}/replies".format(SupportTicket.api_endpoint), + model=self, + data={ + "description": description, + }, + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when creating ticket reply!", json=result + ) + + r = TicketReply(self._client, result["id"], self.id, result) + return r + + def upload_attachment(self, attachment: Union[Path, str]): + """ + Uploads an attachment to an existing Support Ticket. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-ticket-attachment + + :param attachment: A path to the file to upload as an attachment. + :type attachment: str + + :returns: Whether the upload operation was successful. + :rtype: bool + """ + if not isinstance(attachment, Path): + attachment = Path(attachment) + + if not attachment.exists(): + raise ValueError("File not exist, nothing to upload.") + + headers = { + "Authorization": "Bearer {}".format(self._client.token), + } + + with open(attachment, "rb") as f: + result = requests.post( + "{}{}/attachments".format( + self._client.base_url, + SupportTicket.api_endpoint.format(id=self.id), + ), + headers=headers, + files={"file": f}, + ) + + api_exc = ApiError.from_response(result) + if api_exc is not None: + raise api_exc + + return True + + def support_ticket_close(self): + """ + Closes a Support Ticket. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-close-ticket + """ + + self._client.post("{}/close".format(self.api_endpoint), model=self) diff --git a/linode_api4/objects/tag.py b/linode_api4/objects/tag.py new file mode 100644 index 000000000..4f2e7b1cb --- /dev/null +++ b/linode_api4/objects/tag.py @@ -0,0 +1,132 @@ +from linode_api4.objects import ( + Base, + Domain, + Instance, + NodeBalancer, + Property, + Volume, +) +from linode_api4.paginated_list import PaginatedList + +CLASS_MAP = { + "linode": Instance, + "domain": Domain, + "nodebalancer": NodeBalancer, + "volume": Volume, +} + + +class Tag(Base): + """ + A User-defined labels attached to objects in your Account, such as Linodes. + Used for specifying and grouping attributes of objects that are relevant to the User. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-tags + """ + + api_endpoint = "/tags/{label}" + id_attribute = "label" + + properties = { + "label": Property(identifier=True), + } + + def _get_raw_objects(self): + """ + Helper function to populate the first page of raw objects for this tag. + This has the side effect of creating the ``_raw_objects`` attribute of + this object. + """ + if not hasattr(self, "_raw_objects"): + result = self._client.get(type(self).api_endpoint, model=self) + + # I want to cache this to avoid making duplicate requests, but I don't + # want it in the __init__ + self._raw_objects = result + + return self._raw_objects + + def _api_get(self): + """ + Override the default behavior and just return myself if I exist - this + is how the python library works, but calling a GET to this endpoint in + the API returns a collection of objects with this tag. See ``objects`` + below. + """ + # do this to allow appropriate 404ing if this tag doesn't exist + self._get_raw_objects() + + return self + + @property + def objects(self): + """ + Returns a list of objects with this Tag. This list may contain any + taggable object type. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-tagged-objects + + :returns: Objects with this Tag + :rtype: PaginatedList of objects with this Tag + """ + data = self._get_raw_objects() + + return PaginatedList.make_paginated_list( + data, + self._client, + TaggedObjectProxy, + page_url=type(self).api_endpoint.format(**vars(self)), + ) + + +class TaggedObjectProxy: + """ + This class accepts an object from a list of Tagged objects and returns + the correct type of object based on the response data. + + .. warning:: + + It is incorrect to instantiate this class. This class is a proxy for the + enveloped objects returned from the tagged objects collection, and should + only be used in that context. + """ + + id_attribute = ( + "type" # the envelope containing tagged objects has a `type` field + ) + # that defined what type of object is in the envelope. We'll + # use that as the ID for the proxy class so ``make_instance`` + # below can easily tell what type it should actually be + # making and returning. + + @classmethod + def make_instance(cls, id, client, parent_id=None, json=None): + """ + Overrides Base's ``make_instance`` to allow dynamic creation of objects + based on the defined type in the response json. + + :param cls: The class this was called on + :param id: The id of the instance to create + :param client: The client to use for this instance + :param parent_id: The parent id for derived classes + :param json: The JSON to populate the instance with + + :returns: A new instance of this type, populated with json + :rtype: TaggedObjectProxy + """ + make_cls = CLASS_MAP.get( + id + ) # in this case, ID is coming in as the type + + if make_cls is None: + # we don't recognize this entity type - do nothing? + return None + + # discard the envelope + real_json = json["data"] + real_id = real_json["id"] + + # make the real object type + return Base.make( + real_id, client, make_cls, parent_id=None, json=real_json + ) diff --git a/linode_api4/objects/volume.py b/linode_api4/objects/volume.py new file mode 100644 index 000000000..cda9932ab --- /dev/null +++ b/linode_api4/objects/volume.py @@ -0,0 +1,147 @@ +from linode_api4.common import Price, RegionPrice +from linode_api4.errors import UnexpectedResponseError +from linode_api4.objects.base import ( + Base, + Property, + _flatten_request_body_recursive, +) +from linode_api4.objects.linode import Instance, Region +from linode_api4.objects.region import Region +from linode_api4.util import drop_null_keys + + +class VolumeType(Base): + """ + An VolumeType represents the structure of a valid Volume type. + Currently the VolumeType can only be retrieved by listing, i.e.: + types = client.volumes.types() + + API documentation: https://techdocs.akamai.com/linode-api/reference/get-volume-types + """ + + properties = { + "id": Property(identifier=True), + "label": Property(), + "price": Property(json_object=Price), + "region_prices": Property(json_object=RegionPrice), + "transfer": Property(), + } + + +class Volume(Base): + """ + A single Block Storage Volume. Block Storage Volumes are persistent storage devices + that can be attached to a Compute Instance and used to store any type of data. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-volume + """ + + api_endpoint = "/volumes/{id}" + + properties = { + "id": Property(identifier=True), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + "linode_id": Property(id_relationship=Instance), + "label": Property(mutable=True), + "size": Property(), + "status": Property(), + "region": Property(slug_relationship=Region), + "tags": Property(mutable=True, unordered=True), + "filesystem_path": Property(), + "hardware_type": Property(), + "linode_label": Property(), + "encryption": Property(), + } + + def attach(self, to_linode, config=None): + """ + Attaches this Volume to the given Linode. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-attach-volume + + :param to_linode: The ID or object of the Linode to attach the volume to. + :type to_linode: Union[Instance, int] + + :param config: The ID or object of the Linode Config to include this Volume in. + Must belong to the Linode referenced by linode_id. + If not given, the last booted Config will be chosen. + :type config: Union[Config, int] + """ + + body = { + "linode_id": to_linode, + "config": config, + } + + result = self._client.post( + "{}/attach".format(Volume.api_endpoint), + model=self, + data=_flatten_request_body_recursive(drop_null_keys(body)), + ) + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response when attaching volume!", json=result + ) + + self._populate(result) + return True + + def detach(self): + """ + Detaches this Volume if it is attached + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-detach-volume + + :returns: Returns true if operation was successful + :rtype: bool + """ + self._client.post("{}/detach".format(Volume.api_endpoint), model=self) + + return True + + def resize(self, size): + """ + Resizes this Volume + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-resize-volume + + :param size: The Volumeโ€™s size, in GiB. + :type size: int + + :returns: Returns true if operation was successful + :rtype: bool + """ + result = self._client.post( + "{}/resize".format(Volume.api_endpoint), + model=self, + data={"size": size}, + ) + + self._populate(result) + + return True + + def clone(self, label): + """ + Clones this volume to a new volume in the same region with the given label + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-clone-volume + + :param label: The label for the new volume. + :type label: str + + :returns: The new volume object. + :rtype: Volume + """ + result = self._client.post( + "{}/clone".format(Volume.api_endpoint), + model=self, + data={"label": label}, + ) + + if not "id" in result: + raise UnexpectedResponseError("Unexpected response cloning volume!") + + return Volume(self._client, result["id"], result) diff --git a/linode_api4/objects/vpc.py b/linode_api4/objects/vpc.py new file mode 100644 index 000000000..4adecc2e3 --- /dev/null +++ b/linode_api4/objects/vpc.py @@ -0,0 +1,170 @@ +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Union + +from linode_api4.errors import UnexpectedResponseError +from linode_api4.objects import Base, DerivedBase, Property, Region +from linode_api4.objects.base import _flatten_request_body_recursive +from linode_api4.objects.networking import VPCIPAddress +from linode_api4.objects.serializable import JSONObject +from linode_api4.paginated_list import PaginatedList +from linode_api4.util import drop_null_keys + + +@dataclass +class VPCIPv6RangeOptions(JSONObject): + """ + VPCIPv6RangeOptions is used to specify an IPv6 range when creating or updating a VPC. + """ + + range: str = "" + allocation_class: Optional[str] = None + + +@dataclass +class VPCIPv6Range(JSONObject): + """ + VPCIPv6Range represents a single VPC IPv6 range. + """ + + put_class = VPCIPv6RangeOptions + + range: str = "" + + +@dataclass +class VPCSubnetIPv6RangeOptions(JSONObject): + """ + VPCSubnetIPv6RangeOptions is used to specify an IPv6 range when creating or updating a VPC subnet. + """ + + range: str = "" + + +@dataclass +class VPCSubnetIPv6Range(JSONObject): + """ + VPCSubnetIPv6Range represents a single VPC subnet IPv6 range. + """ + + put_class = VPCSubnetIPv6RangeOptions + + range: str = "" + + +@dataclass +class VPCSubnetLinodeInterface(JSONObject): + id: int = 0 + config_id: Optional[int] = None + active: bool = False + + +@dataclass +class VPCSubnetLinode(JSONObject): + id: int = 0 + interfaces: Optional[List[VPCSubnetLinodeInterface]] = None + + +@dataclass +class VPCSubnetDatabase(JSONObject): + id: int = 0 + ipv4_range: Optional[str] = None + ipv6_ranges: Optional[List[str]] = None + + +class VPCSubnet(DerivedBase): + """ + An instance of a VPC subnet. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-vpc-subnet + """ + + api_endpoint = "/vpcs/{vpc_id}/subnets/{id}" + derived_url_path = "subnets" + parent_id_name = "vpc_id" + + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "ipv4": Property(), + "ipv6": Property(json_object=VPCSubnetIPv6Range, unordered=True), + "linodes": Property(json_object=VPCSubnetLinode, unordered=True), + "databases": Property(json_object=VPCSubnetDatabase, unordered=True), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + } + + +class VPC(Base): + """ + An instance of a VPC. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-vpc + """ + + api_endpoint = "/vpcs/{id}" + + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + "description": Property(mutable=True), + "region": Property(slug_relationship=Region), + "ipv6": Property(json_object=VPCIPv6Range, unordered=True), + "subnets": Property(derived_class=VPCSubnet), + "created": Property(is_datetime=True), + "updated": Property(is_datetime=True), + } + + def subnet_create( + self, + label: str, + ipv4: Optional[str] = None, + ipv6: Optional[ + List[Union[VPCSubnetIPv6RangeOptions, Dict[str, Any]]] + ] = None, + **kwargs, + ) -> VPCSubnet: + """ + Creates a new Subnet object under this VPC. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/post-vpc-subnet + + :param label: The label of this subnet. + :type label: str + :param ipv4: The IPv4 range of this subnet in CIDR format. + :type ipv4: str + :param ipv6: The IPv6 range of this subnet in CIDR format. + :type ipv6: List[Union[VPCSubnetIPv6RangeOptions, Dict[str, Any]]] + """ + params = {"label": label, "ipv4": ipv4, "ipv6": ipv6} + + params.update(kwargs) + + result = self._client.post( + "{}/subnets".format(VPC.api_endpoint), + model=self, + data=drop_null_keys(_flatten_request_body_recursive(params)), + ) + self.invalidate() + + if not "id" in result: + raise UnexpectedResponseError( + "Unexpected response creating Subnet", json=result + ) + + d = VPCSubnet(self._client, result["id"], self.id, result) + return d + + @property + def ips(self) -> PaginatedList: + """ + Get all the IP addresses under this VPC. + + API Documentation: https://techdocs.akamai.com/linode-api/reference/get-vpc-ips + + :returns: A list of VPCIPAddresses the acting user can access. + :rtype: PaginatedList of VPCIPAddress + """ + + return self._client._get_and_filter( + VPCIPAddress, endpoint="/vpcs/{}/ips".format(self.id) + ) diff --git a/linode_api4/paginated_list.py b/linode_api4/paginated_list.py new file mode 100644 index 000000000..b9421de6a --- /dev/null +++ b/linode_api4/paginated_list.py @@ -0,0 +1,262 @@ +import math + +from linode_api4.objects.serializable import JSONObject + + +class PaginatedList(object): + """ + The PaginatedList encapsulates the API V4's pagination in an easily + consumable way. A PaginatedList may be treated like a normal `list` in all + ways, and can be iterated over, indexed, and sliced. + + PaginatedLists should never be constructed manually, and instead should + be created by requesting a collection of resources from the :any:`LinodeClient`. + For example:: + + linodes = client.linode.instances() # returns a PaginatedList of Linodes + + Once you have a PaginatedList of resources, it doesn't matter how many + resources the API will return - you can iterate over all of them without + having to worry about pagination.:: + + # iterate over all linodes. If there are two or more pages, + # they will be loaded as required. + for linode in linodes: + print(linode.label) + + You may access the number of items in a collection by calling `len` on the + PaginatedList:: + + num_linodes = len(linodes) + + This will _not_ emit another API request. + """ + + def __init__( + self, + client, + page_endpoint, + page=[], + max_pages=1, + total_items=None, + parent_id=None, + filters=None, + ): + self.client = client + self.page_endpoint = page_endpoint + self.query_filters = filters + self.page_size = len(page) + self.max_pages = max_pages + self.lists = [None for _ in range(0, self.max_pages)] + if self.lists: + self.lists[0] = page + self.list_cls = ( + type(page[0]) if page else None + ) # TODO if this is None that's bad + self.objects_parent_id = parent_id + self.cur = 0 # for being a generator + + self.total_items = total_items + if not total_items: + self.total_items = len(page) + + def first(self): + """ + A convenience method for getting only the first item in this list. + Exactly equivalent to getting index 0. + + :returns: The first item in this list. + """ + return self[0] + + def last(self): + """ + A convenience method for getting only the last item in this list. + Exactly equivalent to getting index -1. + + :returns: The first item in this list. + """ + return self[-1] + + def only(self): + """ + Returns the first item in this list, and asserts that it is the only + item. This is useful when querying a collection for a resource and + expecting to get only one back. For instance:: + + # raises if it finds more than one Linode + production_box = client.linode.instances(Linode.group == "prod").only() + + :returns: The first and only item in this list. + :raises ValueError: If more than one item is in this list. + """ + if len(self) == 1: + return self[0] + raise ValueError("List {} has more than one element!".format(self)) + + def __repr__(self): + return "PaginatedList ({} items)".format(self.total_items) + + def _load_page(self, page_number): + j = self.client.get( + "/{}?page={}&page_size={}".format( + self.page_endpoint, page_number + 1, self.page_size + ), + filters=self.query_filters, + ) + + if j["pages"] != self.max_pages or j["results"] != len(self): + raise RuntimeError( + "List {} has changed since creation!".format(self) + ) + + l = PaginatedList.make_list( + j["data"], + self.client, + self.list_cls, + parent_id=self.objects_parent_id, + ) + self.lists[page_number] = l + + def __getitem__(self, index): + # this comes in here now, but we're hadling it elsewhere + if isinstance(index, slice): + return self._get_slice(index) + + # handle negative indexing + if index < 0: + index = len(self) + index + if index < 0: + raise IndexError("list index out of range") + + if index >= self.page_size * self.max_pages: + raise IndexError("list index out of range") + normalized_index = index % self.page_size + target_page = math.ceil((index + 1.0) / self.page_size) - 1 + target_page = int(target_page) + + if not self.lists[target_page]: + self._load_page(target_page) + + return self.lists[target_page][normalized_index] + + def __len__(self): + return self.total_items + + def _get_slice(self, s): + # get range + i = s.start if s.start is not None else 0 + j = s.stop if s.stop is not None else self.total_items + + # we do not support steps outside of 1 yet + if s.step is not None and s.step != 1: + raise NotImplementedError( + "Only step sizes of 1 are currently supported." + ) + + # if i or j are negative, normalize them + if i < 0: + i = self.total_items + i + + if j < 0: + j = self.total_items + j + + # if i or j are still negative, that's an IndexError + if i < 0 or j < 0: + raise IndexError("list index out of range") + + # if we're going nowhere or backward, return nothing + if j <= i: + return [] + + result = [] + + for c in range(i, j): + result.append(self[c]) + + return result + + def __setitem__(self, index, value): + raise AttributeError( + "Assigning to indicies in paginated lists is not supported" + ) + + def __delitem__(self, index): + raise AttributeError("Deleting from paginated lists is not supported") + + def __next__(self): + if self.cur < len(self): + self.cur += 1 + return self[self.cur - 1] + else: + raise StopIteration() + + @staticmethod + def make_list(json_arr, client, cls, parent_id=None): + """ + Returns a list of Populated objects of the given class type. This + should not be called outside of the :any:`LinodeClient` class. + + :param json_arr: The array of JSON data to make into a list + :param client: The LinodeClient to pass to new objects + :param parent_id: The parent id for derived objects + + :returns: A list of models from the JSON + """ + result = [] + + for obj in json_arr: + id_val = None + # Special handling for JSON objects + if issubclass(cls, JSONObject): + result.append(cls.from_json(obj)) + continue + + if "id" in obj: + id_val = obj["id"] + elif ( + hasattr(cls, "id_attribute") + and getattr(cls, "id_attribute") in obj + ): + id_val = obj[getattr(cls, "id_attribute")] + else: + continue + o = cls.make_instance(id_val, client, parent_id=parent_id, json=obj) + result.append(o) + + return result + + @staticmethod + def make_paginated_list( + json, client, cls, parent_id=None, page_url=None, filters=None + ): + """ + Returns a PaginatedList populated with the first page of data provided, + and the ability to load additional pages. This should not be called + outside of the :any:`LinodeClient` class. + + :param json: The JSON list to use as the first page + :param client: A LinodeClient to use to load additional pages + :param parent_id: The parent ID for derived objects + :param page_url: The URL to use when loading more pages + :param cls: The class to instantiate for objects + :param filters: The filters used when making the call that generated + this list. If not provided, this will fail when + loading additional pages. + + :returns: An instance of PaginatedList that will represent the entire + collection whose first page is json + """ + l = PaginatedList.make_list( + json["data"], client, cls, parent_id=parent_id + ) + p = PaginatedList( + client, + page_url, + page=l, + max_pages=json["pages"], + total_items=json["results"], + parent_id=parent_id, + filters=filters, + ) + return p diff --git a/linode_api4/polling.py b/linode_api4/polling.py new file mode 100644 index 000000000..7dc08d915 --- /dev/null +++ b/linode_api4/polling.py @@ -0,0 +1,250 @@ +import datetime +from typing import Any, Dict, List, Optional + +import polling + +from linode_api4.objects import Event + + +class EventError(Exception): + """ + Represents a failed Linode event. + """ + + def __init__(self, event_id: int, message: Optional[str]): + # Edge case, sometimes the message is populated with an empty string + if message is not None and len(message) < 1: + message = None + + self.event_id = event_id + self.message = message + + error_fmt = f"Event {event_id} failed" + if message is not None: + error_fmt += f": {message}" + + super().__init__(error_fmt) + + +class TimeoutContext: + """ + TimeoutContext should be used by polling resources to track their provisioning time. + """ + + def __init__(self, timeout_seconds=120): + self._start_time = datetime.datetime.now() + self._timeout_seconds = timeout_seconds + + def start(self, start_time=datetime.datetime.now()): + """ + Sets the timeout start time to the current time. + + :param start_time: The moment when the context started. + :type start_time: datetime + """ + self._start_time = start_time + + def extend(self, seconds: int): + """ + Extends the timeout window. + + :param seconds: The number of seconds to extend the timeout period by. + :type seconds: int + """ + self._timeout_seconds += seconds + + @property + def expired(self): + """ + Whether the current timeout period has been exceeded. + + :returns: Whether this context is expired. + :rtype: bool + """ + return self.seconds_remaining < 0 + + @property + def valid(self): + """ + Whether the current timeout period has not been exceeded. + + :returns: Whether this context is valid. + :rtype: bool + """ + return not self.expired + + @property + def seconds_remaining(self): + """ + The number of seconds until the timeout period has expired. + + :returns: The number of seconds remaining in this context. + :rtype: int + """ + return self._timeout_seconds - self.seconds_since_started + + @property + def seconds_since_started(self): + """ + The number of seconds since the timeout period started. + + :returns: The number of seconds since the context started. + :rtype: int + """ + return (datetime.datetime.now() - self._start_time).seconds + + +class EventPoller: + """ + EventPoller allows modules to dynamically poll for Linode events + """ + + def __init__( + self, + client: "LinodeClient", + entity_type: str, + action: str, + entity_id: Optional[int] = None, + ): + self._client = client + self._entity_type = entity_type + self._entity_id = entity_id + self._action = action + + # Initialize with an empty cache if no entity is specified + if self._entity_id is None: + self._previous_event_cache = {} + return + + # We only want the first page of this response + result = client.get("/account/events", filters=self._build_filter()) + + self._previous_event_cache = {v["id"]: v for v in result["data"]} + + def _build_filter(self) -> Dict[str, Any]: + """Generates a filter dict to use in HTTP requests""" + return { + "+order": "asc", + "+order_by": "created", + "entity.id": self._entity_id, + "entity.type": self._entity_type, + "action": self._action, + } + + def set_entity_id(self, entity_id: int) -> None: + """ + Sets the ID of the entity to filter on. + This is useful for create operations where + the entity id might not be known in __init__. + + :param entity_id: The ID of the entity to poll for. + :type entity_id: int + """ + self._entity_id = entity_id + + def _attempt_merge_event_into_cache(self, event: Dict[str, Any]): + """ + Attempts to merge the given event into the event cache. + """ + + if event["id"] in self._previous_event_cache: + return + + self._previous_event_cache[event["id"]] = event + + def _check_has_new_event( + self, events: List[Dict[str, Any]] + ) -> Optional[Dict[str, Any]]: + """ + If a new event is found in the given list, return it. + """ + + for event in events: + # Ignore cached events + if event["id"] in self._previous_event_cache: + continue + + return event + + return None + + def wait_for_next_event( + self, timeout: int = 240, interval: int = 5 + ) -> Event: + """ + Waits for and returns the next event matching the + poller's configuration. + + :param timeout: The timeout in seconds before this polling operation will fail. + :type timeout: int + :param interval: The time in seconds to wait between polls. + :type interval: int + + :returns: The resulting event. + :rtype: Event + """ + result_event: Dict[str, Any] = {} + + def poll_func(): + new_event = self._check_has_new_event( + self._client.get( + "/account/events", filters=self._build_filter() + )["data"] + ) + + event_exists = new_event is not None + + if event_exists: + nonlocal result_event + result_event = new_event + self._attempt_merge_event_into_cache(new_event) + + return event_exists + + if poll_func(): + return Event(self._client, result_event["id"], json=result_event) + + polling.poll( + poll_func, + step=interval, + timeout=timeout, + ) + + return Event(self._client, result_event["id"], json=result_event) + + def wait_for_next_event_finished( + self, timeout: int = 240, interval: int = 5 + ) -> Event: + """ + Waits for the next event to enter status `finished` or `notification`. + + :param timeout: The timeout in seconds before this polling operation will fail. + :type timeout: int + :param interval: The time in seconds to wait between polls. + :type interval: int + + :returns: The resulting event. + :rtype: Event + """ + + timeout_ctx = TimeoutContext(timeout_seconds=timeout) + event = self.wait_for_next_event(timeout_ctx.seconds_remaining) + + def poll_func(): + event._api_get() + + if event.status == "failed": + raise EventError(event.id, event.message) + + return event.status in ["finished", "notification"] + + if poll_func(): + return event + + polling.poll( + poll_func, + step=interval, + timeout=timeout_ctx.seconds_remaining, + ) + + return event diff --git a/linode_api4/util.py b/linode_api4/util.py new file mode 100644 index 000000000..f661367af --- /dev/null +++ b/linode_api4/util.py @@ -0,0 +1,55 @@ +""" +Contains various utility functions. +""" + +import string +from typing import Any, Dict + + +def drop_null_keys(data: Dict[Any, Any], recursive=True) -> Dict[Any, Any]: + """ + Traverses a dict and drops any keys that map to None values. + """ + + if not recursive: + return {k: v for k, v in data.items() if v is not None} + + def recursive_helper(value: Any) -> Any: + if isinstance(value, dict): + return { + k: recursive_helper(v) + for k, v in value.items() + if v is not None + } + + if isinstance(value, list): + return [recursive_helper(v) for v in value] + + return value + + return recursive_helper(data) + + +def generate_device_suffixes(n: int) -> list[str]: + """ + Generate n alphabetical suffixes starting with a, b, c, etc. + After z, continue with aa, ab, ac, etc. followed by aaa, aab, etc. + Example: + generate_device_suffixes(30) -> + ['a', 'b', 'c', ..., 'z', 'aa', 'ab', 'ac', 'ad'] + """ + letters = string.ascii_lowercase + result = [] + i = 0 + + while len(result) < n: + s = "" + x = i + while True: + s = letters[x % 26] + s + x = x // 26 - 1 + if x < 0: + break + result.append(s) + i += 1 + return result diff --git a/linode_api4/version.py b/linode_api4/version.py new file mode 100644 index 000000000..04065ecda --- /dev/null +++ b/linode_api4/version.py @@ -0,0 +1,5 @@ +""" +The version of this linode_api4 package. +""" + +__version__ = "0.0.0.dev" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..4d8542cfa --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,96 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + + +[project] +name = "linode_api4" +authors = [{ name = "Linode", email = "devs@linode.com" }] +description = "The official Python SDK for Linode API v4" +readme = "README.rst" +requires-python = ">=3.9" +keywords = [ + "akamai", + "Akamai Connected Cloud", + "linode", + "cloud", + "SDK", + "Linode APIv4", +] +license = { text = "BSD-3-Clause" } +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Topic :: Software Development :: Libraries", + "License :: OSI Approved :: BSD License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", +] +dependencies = ["requests", "polling", "deprecated"] +dynamic = ["version"] + +[project.optional-dependencies] +test = ["tox>=4.4.0"] + +dev = [ + "tox>=4.4.0", + "mock>=5.0.0", + "pytest>=7.3.1", + "httpretty>=1.1.4", + "black>=23.1.0", + "isort>=5.12.0", + "autoflake>=2.0.1", + "pylint", + "twine>=4.0.2", + "build>=0.10.0", + "Sphinx>=6.0.0", + "sphinx-autobuild>=2021.3.14", + "sphinxcontrib-fulltoc>=1.2.0", + "build>=0.10.0", + "twine>=4.0.2", + "pytest-rerunfailures", +] + +doc = [ + "Sphinx>=6.0.0", + "sphinx-autobuild>=2021.3.14", + "sphinxcontrib-fulltoc>=1.2.0", +] + +[project.urls] +Homepage = "https://github.com/linode/linode_api4-python" +Documentation = "https://linode-api4.readthedocs.io/" +Repository = "https://github.com/linode/linode_api4-python.git" + +[tool.setuptools.dynamic] +version = { attr = "linode_api4.version.__version__" } + +[tool.setuptools.packages.find] +exclude = ['contrib', 'docs', 'build', 'build.*', 'test', 'test.*'] + +[tool.isort] +profile = "black" +line_length = 80 + +[tool.black] +line-length = 80 +target-version = ["py38", "py39", "py310", "py311", "py312"] + +[tool.autoflake] +expand-star-imports = true +ignore-init-module-imports = true +ignore-pass-after-docstring = true +in-place = true +recursive = true +remove-all-unused-imports = true +remove-duplicate-keys = false + +[tool.pytest.ini_options] +markers = [ + "smoke: mark a test as a smoke test", + "flaky: mark a test as a flaky test for rerun" +] \ No newline at end of file diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 00afeec47..000000000 --- a/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -httplib2 -enum34 diff --git a/scripts/lke-policy.yaml b/scripts/lke-policy.yaml new file mode 100644 index 000000000..9859ca8b4 --- /dev/null +++ b/scripts/lke-policy.yaml @@ -0,0 +1,78 @@ +apiVersion: projectcalico.org/v3 +kind: GlobalNetworkPolicy +metadata: + name: lke-rules +spec: + preDNAT: true + applyOnForward: true + order: 100 + # Remember to run calicoctl patch command for this to work + selector: "" + ingress: + # Allow ICMP + - action: Allow + protocol: ICMP + - action: Allow + protocol: ICMPv6 + + # Allow LKE-required ports + - action: Allow + protocol: TCP + destination: + nets: + - 192.168.128.0/17 + - 10.0.0.0/8 + ports: + - 10250 + - 10256 + - 179 + - action: Allow + protocol: UDP + destination: + nets: + - 192.168.128.0/17 + - 10.2.0.0/16 + ports: + - 51820 + + # Allow NodeBalancer ingress to the Node Ports & Allow DNS + - action: Allow + protocol: TCP + source: + nets: + - 192.168.255.0/24 + - 10.0.0.0/8 + destination: + ports: + - 53 + - 30000:32767 + - action: Allow + protocol: UDP + source: + nets: + - 192.168.255.0/24 + - 10.0.0.0/8 + destination: + ports: + - 53 + - 30000:32767 + + # Allow cluster internal communication + - action: Allow + destination: + nets: + - 10.0.0.0/8 + - action: Allow + source: + nets: + - 10.0.0.0/8 + + # 127.0.0.1/32 is needed for kubectl exec and node-shell + - action: Allow + destination: + nets: + - 127.0.0.1/32 + + # Block everything else + - action: Deny + - action: Log diff --git a/scripts/lke_calico_rules_e2e.sh b/scripts/lke_calico_rules_e2e.sh new file mode 100755 index 000000000..48ad5caec --- /dev/null +++ b/scripts/lke_calico_rules_e2e.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +RETRIES=3 +DELAY=30 + +# Function to retry a command with exponential backoff +retry_command() { + local retries=$1 + local wait_time=60 + shift + until "$@"; do + if ((retries == 0)); then + echo "Command failed after multiple retries. Exiting." + exit 1 + fi + echo "Command failed. Retrying in $wait_time seconds..." + sleep $wait_time + ((retries--)) + wait_time=$((wait_time * 2)) + done +} + +# Fetch the list of LKE cluster IDs +CLUSTER_IDS=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/lke/clusters" | jq -r '.data[].id') + +# Check if CLUSTER_IDS is empty +if [ -z "$CLUSTER_IDS" ]; then + echo "All clusters have been cleaned and properly destroyed. No need to apply inbound or outbound rules" + exit 0 +fi + +for ID in $CLUSTER_IDS; do + echo "Applying Calico rules to nodes in Cluster ID: $ID" + + # Download cluster configuration file with retry + for ((i=1; i<=RETRIES; i++)); do + config_response=$(curl -sH "Authorization: Bearer $LINODE_TOKEN" "https://api.linode.com/v4/lke/clusters/$ID/kubeconfig") + if [[ $config_response != *"kubeconfig is not yet available"* ]]; then + echo $config_response | jq -r '.[] | @base64d' > "/tmp/${ID}_config.yaml" + break + fi + echo "Attempt $i to download kubeconfig for cluster $ID failed. Retrying in $DELAY seconds..." + sleep $DELAY + done + + if [[ $config_response == *"kubeconfig is not yet available"* ]]; then + echo "kubeconfig for cluster id:$ID not available after $RETRIES attempts, mostly likely it is an empty cluster. Skipping..." + else + # Export downloaded config file + export KUBECONFIG="/tmp/${ID}_config.yaml" + + retry_command $RETRIES kubectl get nodes + + retry_command $RETRIES calicoctl patch kubecontrollersconfiguration default --allow-version-mismatch --patch='{"spec": {"controllers": {"node": {"hostEndpoint": {"autoCreate": "Enabled"}}}}}' + + retry_command $RETRIES calicoctl apply --allow-version-mismatch -f "$(pwd)/lke-policy.yaml" + fi +done diff --git a/setup.py b/setup.py index 09adebd4c..606849326 100755 --- a/setup.py +++ b/setup.py @@ -1,83 +1,3 @@ -#!/usr/bin/env python3 -""" -A setuptools based setup module +from setuptools import setup -Based on a template here: -https://github.com/pypa/sampleproject/blob/master/setup.py -""" - -# Always prefer setuptools over distutils -from setuptools import setup, find_packages -# To use a consistent encoding -from codecs import open -from os import path - -here = path.abspath(path.dirname(__file__)) - -# Get the long description from the README file -with open(path.join(here, 'README.rst'), encoding='utf-8') as f: - long_description = f.read() - -setup( - name='linode-api', - - # Versions should comply with PEP440. For a discussion on single-sourcing - # the version across setup.py and the project code, see - # https://packaging.python.org/en/latest/single_source_version.html - version='4.1.0b', - - description='The official python SDK for Linode API v4', - long_description=long_description, - - # The project's main homepage. - url='https://github.com/linode/python-linode-api', - - # Author details - author='Linode', - author_email='developers@linode.com', - - # Choose your license - license='BSD 3-Clause License', - - # See https://pypi.python.org/pypi?%3Aaction=list_classifiers - classifiers=[ - # How mature is this project? Common values are - # 3 - Alpha - # 4 - Beta - # 5 - Production/Stable - # This is staying in sync with the api's status - 'Development Status :: 4 - Beta', - - # Indicate who your project is intended for - 'Intended Audience :: Developers', - 'Topic :: Software Development :: Libraries', - - # Pick your license as you wish (should match "license" above) - 'License :: OSI Approved :: BSD License', - - # Specify the Python versions you support here. In particular, ensure - # that you indicate whether you support Python 2, Python 3 or both. - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.2', - 'Programming Language :: Python :: 3.3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - ], - - # What does your project relate to? - keywords='linode cloud hosting infrastructure', - - # You can just specify the packages manually here if your project is - # simple. Or you can use find_packages(). - packages=find_packages(exclude=['contrib', 'docs', 'tests']), - - # What do we need for this to run - install_requires=[ - "future", - "requests", - "enum34", - ] -) +setup() diff --git a/linode/functions.py b/test/__init__.py similarity index 100% rename from linode/functions.py rename to test/__init__.py diff --git a/test/fixtures/account.json b/test/fixtures/account.json new file mode 100644 index 000000000..001d7adad --- /dev/null +++ b/test/fixtures/account.json @@ -0,0 +1,38 @@ +{ + "state": "PA", + "city": "Philadelphia", + "phone": "123-456-7890", + "tax_id": "", + "balance": 0, + "company": "Linode", + "address_2": "", + "email": "support@linode.com", + "address_1": "3rd & Arch St", + "zip": "19106", + "first_name": "Test", + "last_name": "Guy", + "country": "US", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Object Storage", + "Linode Interfaces" + ], + "active_promotions": [ + { + "credit_monthly_cap": "10.00", + "credit_remaining": "50.00", + "description": "Receive up to $10 off your services every month for 6 months! Unused credits will expire once this promotion period ends.", + "expire_dt": "2018-01-31T23:59:59", + "image_url": "https://linode.com/10_a_month_promotion.svg", + "service_type": "all", + "summary": "$10 off your Linode a month!", + "this_month_credit_remaining": "10.00" + } + ], + "active_since": "2018-01-01T00:01:01", + "balance_uninvoiced": 145, + "billing_source": "akamai", + "euuid": "E1AF5EEC-526F-487D-B317EBEB34C87D71" +} diff --git a/test/fixtures/account_availability.json b/test/fixtures/account_availability.json new file mode 100644 index 000000000..f308cb975 --- /dev/null +++ b/test/fixtures/account_availability.json @@ -0,0 +1,62 @@ +{ + "data": [ + { + "region": "ap-west", + "unavailable": [], + "available": ["Linodes", "NodeBalancers"] + }, + { + "region": "ca-central", + "unavailable": [], + "available": ["Linodes", "NodeBalancers"] + }, + { + "region": "ap-southeast", + "unavailable": [], + "available": ["Linodes", "NodeBalancers"] + }, + { + "region": "us-central", + "unavailable": [], + "available": ["Linodes", "NodeBalancers"] + }, + { + "region": "us-west", + "unavailable": [], + "available": ["Linodes", "NodeBalancers"] + }, + { + "region": "us-southeast", + "unavailable": [], + "available": ["Linodes", "NodeBalancers"] + }, + { + "region": "us-east", + "unavailable": [], + "available": ["Linodes", "Kubernetes"] + }, + { + "region": "eu-west", + "unavailable": [], + "available": ["Linodes", "Cloud Firewall"] + }, + { + "region": "ap-south", + "unavailable": [], + "available": ["Linodes", "NodeBalancers"] + }, + { + "region": "eu-central", + "unavailable": [], + "available": ["Linodes", "NodeBalancers"] + }, + { + "region": "ap-northeast", + "unavailable": [], + "available": ["Linodes"] + } + ], + "page": 1, + "pages": 1, + "results": 11 +} diff --git a/test/fixtures/account_availability_us-east.json b/test/fixtures/account_availability_us-east.json new file mode 100644 index 000000000..765aeba6e --- /dev/null +++ b/test/fixtures/account_availability_us-east.json @@ -0,0 +1,5 @@ +{ + "region": "us-east", + "unavailable": [], + "available": ["Linodes", "Kubernetes"] +} \ No newline at end of file diff --git a/test/fixtures/account_betas.json b/test/fixtures/account_betas.json new file mode 100644 index 000000000..0ebb3858e --- /dev/null +++ b/test/fixtures/account_betas.json @@ -0,0 +1,15 @@ +{ + "data": [ + { + "id": "cool", + "label": "\r\n\r\nRepellat consequatur sunt qui.", + "enrolled": "2018-01-02T03:04:05", + "description": "Repellat consequatur sunt qui. Fugit eligendi ipsa et assumenda ea aspernatur esse. A itaque iste distinctio qui voluptas eum enim ipsa.", + "started": "2018-01-02T03:04:05", + "ended": "2018-01-02T03:04:05" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/account_betas_cool.json b/test/fixtures/account_betas_cool.json new file mode 100644 index 000000000..39f0310b3 --- /dev/null +++ b/test/fixtures/account_betas_cool.json @@ -0,0 +1,8 @@ +{ + "id": "cool", + "label": "\r\n\r\nRepellat consequatur sunt qui.", + "enrolled": "2018-01-02T03:04:05", + "description": "Repellat consequatur sunt qui. Fugit eligendi ipsa et assumenda ea aspernatur esse. A itaque iste distinctio qui voluptas eum enim ipsa.", + "started": "2018-01-02T03:04:05", + "ended": "2018-01-02T03:04:05" +} \ No newline at end of file diff --git a/test/fixtures/account_child-accounts.json b/test/fixtures/account_child-accounts.json new file mode 100644 index 000000000..e7e9aca43 --- /dev/null +++ b/test/fixtures/account_child-accounts.json @@ -0,0 +1,36 @@ +{ + "data": [ + { + "active_since": "2018-01-01T00:01:01", + "address_1": "123 Main Street", + "address_2": "Suite A", + "balance": 200, + "balance_uninvoiced": 145, + "billing_source": "external", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Object Storage" + ], + "city": "Philadelphia", + "company": "Linode LLC", + "country": "US", + "credit_card": { + "expiry": "11/2022", + "last_four": 1111 + }, + "email": "john.smith@linode.com", + "euuid": "E1AF5EEC-526F-487D-B317EBEB34C87D71", + "first_name": "John", + "last_name": "Smith", + "phone": "215-555-1212", + "state": "PA", + "tax_id": "ATU99999999", + "zip": "19102-1234" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/account_child-accounts_123456.json b/test/fixtures/account_child-accounts_123456.json new file mode 100644 index 000000000..8ce264693 --- /dev/null +++ b/test/fixtures/account_child-accounts_123456.json @@ -0,0 +1,29 @@ +{ + "active_since": "2018-01-01T00:01:01", + "address_1": "123 Main Street", + "address_2": "Suite A", + "balance": 200, + "balance_uninvoiced": 145, + "billing_source": "external", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Object Storage" + ], + "city": "Philadelphia", + "company": "Linode LLC", + "country": "US", + "credit_card": { + "expiry": "11/2022", + "last_four": 1111 + }, + "email": "john.smith@linode.com", + "euuid": "E1AF5EEC-526F-487D-B317EBEB34C87D71", + "first_name": "John", + "last_name": "Smith", + "phone": "215-555-1212", + "state": "PA", + "tax_id": "ATU99999999", + "zip": "19102-1234" +} \ No newline at end of file diff --git a/test/fixtures/account_child-accounts_123456_token.json b/test/fixtures/account_child-accounts_123456_token.json new file mode 100644 index 000000000..44afea72b --- /dev/null +++ b/test/fixtures/account_child-accounts_123456_token.json @@ -0,0 +1,8 @@ +{ + "created": "2024-01-01T00:01:01", + "expiry": "2024-01-01T13:46:32", + "id": 123, + "label": "cool_customer_proxy", + "scopes": "*", + "token": "abcdefghijklmnop" +} \ No newline at end of file diff --git a/test/fixtures/account_events_123.json b/test/fixtures/account_events_123.json new file mode 100644 index 000000000..b24156f90 --- /dev/null +++ b/test/fixtures/account_events_123.json @@ -0,0 +1,31 @@ +{ + "action": "ticket_create", + "created": "2025-03-25T12:00:00", + "duration": 300.56, + "entity": { + "id": 11111, + "label": "Problem booting my Linode", + "type": "ticket", + "url": "/v4/support/tickets/11111" + }, + "id": 123, + "message": "Ticket created for user issue.", + "percent_complete": null, + "rate": null, + "read": true, + "secondary_entity": { + "id": "linode/debian9", + "label": "linode1234", + "type": "linode", + "url": "/v4/linode/instances/1234" + }, + "seen": true, + "status": "completed", + "username": "exampleUser", + "maintenance_policy_set": "Tentative", + "description": "Scheduled maintenance", + "source": "user", + "not_before": "2025-03-25T12:00:00", + "start_time": "2025-03-25T12:30:00", + "complete_time": "2025-03-25T13:00:00" +} \ No newline at end of file diff --git a/test/fixtures/account_invoices.json b/test/fixtures/account_invoices.json new file mode 100644 index 000000000..980ecde9c --- /dev/null +++ b/test/fixtures/account_invoices.json @@ -0,0 +1,13 @@ +{ + "data": [ + { + "id": 123456, + "date": "2015-01-01T05:01:02", + "label": "Invoice #123456", + "total": 9.51 + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/account_invoices_123.json b/test/fixtures/account_invoices_123.json new file mode 100644 index 000000000..e20fe4de6 --- /dev/null +++ b/test/fixtures/account_invoices_123.json @@ -0,0 +1,14 @@ +{ + "date": "2018-01-01T00:01:01", + "id": 123, + "label": "Invoice", + "subtotal": 120.25, + "tax": 12.25, + "tax_summary": [ + { + "name": "PA STATE TAX", + "tax": 12.25 + } + ], + "total": 132.5 +} diff --git a/test/fixtures/account_invoices_123456_items.json b/test/fixtures/account_invoices_123456_items.json new file mode 100644 index 000000000..289b131a4 --- /dev/null +++ b/test/fixtures/account_invoices_123456_items.json @@ -0,0 +1,16 @@ +{ + "data": [ + { + "from": "2014-12-19T00:27:02", + "label": "Linode 2048 - Example", + "type": "hourly", + "amount": 9.51, + "to": "2015-01-01T04:59:59", + "quantity": 317, + "unit_price": "0.03" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/account_logins.json b/test/fixtures/account_logins.json new file mode 100644 index 000000000..9c54581b1 --- /dev/null +++ b/test/fixtures/account_logins.json @@ -0,0 +1,15 @@ +{ + "data": [ + { + "datetime": "2018-01-01T00:01:01", + "id": 1234, + "ip": "192.0.2.0", + "restricted": true, + "status": "successful", + "username": "test-user" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/account_logins_123.json b/test/fixtures/account_logins_123.json new file mode 100644 index 000000000..3ec95d1ba --- /dev/null +++ b/test/fixtures/account_logins_123.json @@ -0,0 +1,8 @@ +{ + "datetime": "2018-01-01T00:01:01", + "id": 123, + "ip": "192.0.2.0", + "restricted": true, + "status": "successful", + "username": "test-user" +} \ No newline at end of file diff --git a/test/fixtures/account_maintenance.json b/test/fixtures/account_maintenance.json new file mode 100644 index 000000000..30f8ed19e --- /dev/null +++ b/test/fixtures/account_maintenance.json @@ -0,0 +1,41 @@ +{ + "pages": 1, + "page": 1, + "results": 2, + "data": [ + { + "entity": { + "id": 1234, + "label": "Linode #1234", + "type": "linode", + "url": "/linodes/1234" + }, + "reason": "Scheduled upgrade to faster NVMe hardware.", + "type": "linode_migrate", + "maintenance_policy_set": "linode/power_off_on", + "description": "Scheduled Maintenance", + "source": "platform", + "not_before": "2025-03-25T10:00:00Z", + "start_time": "2025-03-25T12:00:00Z", + "complete_time": "2025-03-25T14:00:00Z", + "status": "scheduled" + }, + { + "entity": { + "id": 1234, + "label": "Linode #1234", + "type": "linode", + "url": "/linodes/1234" + }, + "reason": "Pending migration of Linode #1234 to a new host.", + "type": "linode_migrate", + "maintenance_policy_set": "linode/migrate", + "description": "Emergency Maintenance", + "source": "user", + "not_before": "2025-03-26T15:00:00Z", + "start_time": "2025-03-26T15:00:00Z", + "complete_time": "2025-03-26T17:00:00Z", + "status": "in-progress" + } + ] +} diff --git a/test/fixtures/account_notifications.json b/test/fixtures/account_notifications.json new file mode 100644 index 000000000..7e6355221 --- /dev/null +++ b/test/fixtures/account_notifications.json @@ -0,0 +1,22 @@ +{ + "data": [ + { + "body": null, + "entity": { + "id": 3456, + "label": "Linode not booting.", + "type": "ticket", + "url": "/support/tickets/3456" + }, + "label": "You have an important ticket open!", + "message": "You have an important ticket open!", + "severity": "major", + "type": "ticket_important", + "until": null, + "when": null + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/account_oauth-clients_2737bf16b39ab5d7b4a1.json b/test/fixtures/account_oauth-clients_2737bf16b39ab5d7b4a1.json new file mode 100644 index 000000000..1520c8114 --- /dev/null +++ b/test/fixtures/account_oauth-clients_2737bf16b39ab5d7b4a1.json @@ -0,0 +1,9 @@ +{ + "id": "2737bf16b39ab5d7b4a1", + "label": "Test_Client_1", + "public": false, + "redirect_uri": "https://example.org/oauth/callback", + "secret": "", + "status": "active", + "thumbnail_url": "https://api.linode.com/v4/account/clients/2737bf16b39ab5d7b4a1/thumbnail" +} \ No newline at end of file diff --git a/test/fixtures/account_payment-method_123.json b/test/fixtures/account_payment-method_123.json new file mode 100644 index 000000000..611e49713 --- /dev/null +++ b/test/fixtures/account_payment-method_123.json @@ -0,0 +1,12 @@ +{ + "created": "2018-01-15T00:01:01", + "data": { + "card_type": "Discover", + "expiry": "06/2022", + "last_four": "1234" + }, + "id": 123, + "is_default": true, + "type": "credit_card" + } + \ No newline at end of file diff --git a/test/fixtures/account_payment-methods.json b/test/fixtures/account_payment-methods.json new file mode 100644 index 000000000..2619af248 --- /dev/null +++ b/test/fixtures/account_payment-methods.json @@ -0,0 +1,18 @@ +{ + "data": [ + { + "created": "2018-01-15T00:01:01", + "data": { + "card_type": "Discover", + "expiry": "06/2022", + "last_four": "1234" + }, + "id": 123, + "is_default": true, + "type": "credit_card" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/account_payments.json b/test/fixtures/account_payments.json new file mode 100644 index 000000000..f218ae1bc --- /dev/null +++ b/test/fixtures/account_payments.json @@ -0,0 +1,13 @@ +{ + "data": [ + { + "id": 123456, + "date": "2015-01-01T05:01:02", + "usd": 1000 + } + ], + "page": 1, + "pages": 1, + "results": 1 + } + \ No newline at end of file diff --git a/test/fixtures/account_promo-codes.json b/test/fixtures/account_promo-codes.json new file mode 100644 index 000000000..838762934 --- /dev/null +++ b/test/fixtures/account_promo-codes.json @@ -0,0 +1,10 @@ +{ + "credit_monthly_cap": "10.00", + "credit_remaining": "50.00", + "description": "Receive up to $10 off your services every month for 6 months! Unused credits will expire once this promotion period ends.", + "expire_dt": "2018-01-31T23:59:59", + "image_url": "https://linode.com/10_a_month_promotion.svg", + "service_type": "all", + "summary": "$10 off your Linode a month!", + "this_month_credit_remaining": "10.00" + } \ No newline at end of file diff --git a/test/fixtures/account_service-transfers.json b/test/fixtures/account_service-transfers.json new file mode 100644 index 000000000..cbf4a0c60 --- /dev/null +++ b/test/fixtures/account_service-transfers.json @@ -0,0 +1,21 @@ +{ + "data": [ + { + "created": "2021-02-11T16:37:03", + "entities": { + "linodes": [ + 111, + 222 + ] + }, + "expiry": "2021-02-12T16:37:03", + "is_sender": true, + "status": "pending", + "token": "123E4567-E89B-12D3-A456-426614174000", + "updated": "2021-02-11T16:37:03" + } + ], + "page": 1, + "pages": 1, + "results": 1 + } \ No newline at end of file diff --git a/test/fixtures/account_service-transfers_12345.json b/test/fixtures/account_service-transfers_12345.json new file mode 100644 index 000000000..819506524 --- /dev/null +++ b/test/fixtures/account_service-transfers_12345.json @@ -0,0 +1,14 @@ +{ + "created": "2021-02-11T16:37:03", + "entities": { + "linodes": [ + 111, + 222 + ] + }, + "expiry": "2021-02-12T16:37:03", + "is_sender": true, + "status": "pending", + "token": "12345", + "updated": "2021-02-11T16:37:03" + } \ No newline at end of file diff --git a/test/fixtures/account_settings.json b/test/fixtures/account_settings.json new file mode 100644 index 000000000..963c37306 --- /dev/null +++ b/test/fixtures/account_settings.json @@ -0,0 +1,9 @@ +{ + "longview_subscription": "longview-100", + "managed": false, + "network_helper": false, + "object_storage": "active", + "backups_enabled": true, + "interfaces_for_new_linodes": "linode_default_but_legacy_config_allowed", + "maintenance_policy": "linode/migrate" +} diff --git a/test/fixtures/account_transfer.json b/test/fixtures/account_transfer.json new file mode 100644 index 000000000..ce4658a6a --- /dev/null +++ b/test/fixtures/account_transfer.json @@ -0,0 +1,14 @@ +{ + "quota": 471, + "used": 737373, + "billable": 0, + + "region_transfers": [ + { + "id": "ap-west", + "used": 1, + "quota": 5010, + "billable": 0 + } + ] +} \ No newline at end of file diff --git a/test/fixtures/account_users_test-user.json b/test/fixtures/account_users_test-user.json new file mode 100644 index 000000000..66e5f9b12 --- /dev/null +++ b/test/fixtures/account_users_test-user.json @@ -0,0 +1,10 @@ +{ + "email": "test-user@linode.com", + "restricted": true, + "ssh_keys": [ + "home-pc", + "laptop" + ], + "tfa_enabled": true, + "username": "test-user" + } \ No newline at end of file diff --git a/test/fixtures/betas.json b/test/fixtures/betas.json new file mode 100644 index 000000000..8af261307 --- /dev/null +++ b/test/fixtures/betas.json @@ -0,0 +1,24 @@ +{ + "data": [ + { + "id": "active_closed", + "label": "active closed beta", + "description": "An active closed beta", + "started": "2023-07-19T15:23:43", + "ended": null, + "greenlight_only": true, + "more_info": "a link with even more info" + }, + { + "id": "limited", + "label": "limited beta", + "description": "An active limited beta", + "started": "2023-07-19T15:23:43", + "ended": null, "greenlight_only": false, + "more_info": "a link with even more info" + } + ], + "page": 1, + "pages": 1, + "results": 2 +} \ No newline at end of file diff --git a/test/fixtures/betas_active.json b/test/fixtures/betas_active.json new file mode 100644 index 000000000..ce9db7c14 --- /dev/null +++ b/test/fixtures/betas_active.json @@ -0,0 +1,9 @@ +{ + "id": "active", + "label": "active closed beta", + "description": "An active closed beta", + "started": "2018-01-02T03:04:05", + "ended": null, + "greenlight_only": true, + "more_info": "a link with even more info" +} \ No newline at end of file diff --git a/test/fixtures/databases_engines.json b/test/fixtures/databases_engines.json new file mode 100644 index 000000000..6418f93ab --- /dev/null +++ b/test/fixtures/databases_engines.json @@ -0,0 +1,17 @@ +{ + "data": [ + { + "engine": "mysql", + "id": "mysql/8.0.26", + "version": "8.0.26" + }, + { + "engine": "postgresql", + "id": "postgresql/10.14", + "version": "10.14" + } + ], + "page": 1, + "pages": 1, + "results": 2 +} \ No newline at end of file diff --git a/test/fixtures/databases_instances.json b/test/fixtures/databases_instances.json new file mode 100644 index 000000000..d2e6f0cf9 --- /dev/null +++ b/test/fixtures/databases_instances.json @@ -0,0 +1,41 @@ +{ + "data": [ + { + "allow_list": [ + "203.0.113.1/32", + "192.0.1.0/24" + ], + "cluster_size": 3, + "created": "2022-01-01T00:01:01", + "encrypted": false, + "engine": "mysql", + "hosts": { + "primary": "lin-123-456-mysql-mysql-primary.servers.linodedb.net", + "standby": "lin-123-456-mysql-primary-private.servers.linodedb.net" + }, + "id": 123, + "instance_uri": "/v4/databases/mysql/instances/123", + "label": "example-db", + "region": "us-east", + "status": "active", + "type": "g6-dedicated-2", + "updated": "2022-01-01T00:01:01", + "updates": { + "day_of_week": 1, + "duration": 3, + "frequency": "weekly", + "hour_of_day": 0, + "week_of_month": null + }, + "version": "8.0.26", + "private_network": { + "vpc_id": 1234, + "subnet_id": 5678, + "public_access": true + } + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/databases_mysql_config.json b/test/fixtures/databases_mysql_config.json new file mode 100644 index 000000000..9cba0afd4 --- /dev/null +++ b/test/fixtures/databases_mysql_config.json @@ -0,0 +1,230 @@ +{ + "mysql": { + "connect_timeout": { + "description": "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake", + "example": 10, + "maximum": 3600, + "minimum": 2, + "requires_restart": false, + "type": "integer" + }, + "default_time_zone": { + "description": "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", + "example": "+03:00", + "maxLength": 100, + "minLength": 2, + "pattern": "^([-+][\\d:]*|[\\w/]*)$", + "requires_restart": false, + "type": "string" + }, + "group_concat_max_len": { + "description": "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", + "example": 1024, + "maximum": 18446744073709551600, + "minimum": 4, + "requires_restart": false, + "type": "integer" + }, + "information_schema_stats_expiry": { + "description": "The time, in seconds, before cached statistics expire", + "example": 86400, + "maximum": 31536000, + "minimum": 900, + "requires_restart": false, + "type": "integer" + }, + "innodb_change_buffer_max_size": { + "description": "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25", + "example": 30, + "maximum": 50, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_flush_neighbors": { + "description": "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent", + "example": 0, + "maximum": 2, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_ft_min_token_size": { + "description": "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", + "example": 3, + "maximum": 16, + "minimum": 0, + "requires_restart": true, + "type": "integer" + }, + "innodb_ft_server_stopword_table": { + "description": "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", + "example": "db_name/table_name", + "maxLength": 1024, + "pattern": "^.+/.+$", + "requires_restart": false, + "type": [ + "null", + "string" + ] + }, + "innodb_lock_wait_timeout": { + "description": "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", + "example": 50, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "innodb_log_buffer_size": { + "description": "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", + "example": 16777216, + "maximum": 4294967295, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "innodb_online_alter_log_max_size": { + "description": "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", + "example": 134217728, + "maximum": 1099511627776, + "minimum": 65536, + "requires_restart": false, + "type": "integer" + }, + "innodb_read_io_threads": { + "description": "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + "example": 10, + "maximum": 64, + "minimum": 1, + "requires_restart": true, + "type": "integer" + }, + "innodb_rollback_on_timeout": { + "description": "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", + "example": true, + "requires_restart": true, + "type": "boolean" + }, + "innodb_thread_concurrency": { + "description": "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit)", + "example": 10, + "maximum": 1000, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_write_io_threads": { + "description": "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + "example": 10, + "maximum": 64, + "minimum": 1, + "requires_restart": true, + "type": "integer" + }, + "interactive_timeout": { + "description": "The number of seconds the server waits for activity on an interactive connection before closing it.", + "example": 3600, + "maximum": 604800, + "minimum": 30, + "requires_restart": false, + "type": "integer" + }, + "internal_tmp_mem_storage_engine": { + "description": "The storage engine for in-memory internal temporary tables.", + "enum": [ + "TempTable", + "MEMORY" + ], + "example": "TempTable", + "requires_restart": false, + "type": "string" + }, + "max_allowed_packet": { + "description": "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M)", + "example": 67108864, + "maximum": 1073741824, + "minimum": 102400, + "requires_restart": false, + "type": "integer" + }, + "max_heap_table_size": { + "description": "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M)", + "example": 16777216, + "maximum": 1073741824, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "net_buffer_length": { + "description": "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", + "example": 16384, + "maximum": 1048576, + "minimum": 1024, + "requires_restart": true, + "type": "integer" + }, + "net_read_timeout": { + "description": "The number of seconds to wait for more data from a connection before aborting the read.", + "example": 30, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "net_write_timeout": { + "description": "The number of seconds to wait for a block to be written to a connection before aborting the write.", + "example": 30, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "sort_buffer_size": { + "description": "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K)", + "example": 262144, + "maximum": 1073741824, + "minimum": 32768, + "requires_restart": false, + "type": "integer" + }, + "sql_mode": { + "description": "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Akamai default SQL mode (strict, SQL standard compliant) will be assigned.", + "example": "ANSI,TRADITIONAL", + "maxLength": 1024, + "pattern": "^[A-Z_]*(,[A-Z_]+)*$", + "requires_restart": false, + "type": "string" + }, + "sql_require_primary_key": { + "description": "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", + "example": true, + "requires_restart": false, + "type": "boolean" + }, + "tmp_table_size": { + "description": "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M)", + "example": 16777216, + "maximum": 1073741824, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "wait_timeout": { + "description": "The number of seconds the server waits for activity on a noninteractive connection before closing it.", + "example": 28800, + "maximum": 2147483, + "minimum": 1, + "requires_restart": false, + "type": "integer" + } + }, + "binlog_retention_period": { + "description": "The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector.", + "example": 600, + "maximum": 86400, + "minimum": 600, + "requires_restart": false, + "type": "integer" + } +} \ No newline at end of file diff --git a/test/fixtures/databases_mysql_instances.json b/test/fixtures/databases_mysql_instances.json new file mode 100644 index 000000000..c442b8345 --- /dev/null +++ b/test/fixtures/databases_mysql_instances.json @@ -0,0 +1,75 @@ +{ + "data": [ + { + "allow_list": [ + "203.0.113.1/32", + "192.0.1.0/24" + ], + "cluster_size": 3, + "created": "2022-01-01T00:01:01", + "encrypted": false, + "engine": "mysql", + "hosts": { + "primary": "lin-123-456-mysql-mysql-primary.servers.linodedb.net", + "standby": "lin-123-456-mysql-primary-private.servers.linodedb.net" + }, + "id": 123, + "label": "example-db", + "port": 3306, + "region": "us-east", + "replication_type": "semi_synch", + "ssl_connection": true, + "status": "active", + "type": "g6-dedicated-2", + "updated": "2022-01-01T00:01:01", + "updates": { + "day_of_week": 1, + "duration": 3, + "frequency": "weekly", + "hour_of_day": 0, + "week_of_month": null + }, + "version": "8.0.26", + "engine_config": { + "binlog_retention_period": 600, + "mysql": { + "connect_timeout": 10, + "default_time_zone": "+03:00", + "group_concat_max_len": 1024, + "information_schema_stats_expiry": 86400, + "innodb_change_buffer_max_size": 30, + "innodb_flush_neighbors": 0, + "innodb_ft_min_token_size": 3, + "innodb_ft_server_stopword_table": "db_name/table_name", + "innodb_lock_wait_timeout": 50, + "innodb_log_buffer_size": 16777216, + "innodb_online_alter_log_max_size": 134217728, + "innodb_read_io_threads": 10, + "innodb_rollback_on_timeout": true, + "innodb_thread_concurrency": 10, + "innodb_write_io_threads": 10, + "interactive_timeout": 3600, + "internal_tmp_mem_storage_engine": "TempTable", + "max_allowed_packet": 67108864, + "max_heap_table_size": 16777216, + "net_buffer_length": 16384, + "net_read_timeout": 30, + "net_write_timeout": 30, + "sort_buffer_size": 262144, + "sql_mode": "ANSI,TRADITIONAL", + "sql_require_primary_key": true, + "tmp_table_size": 16777216, + "wait_timeout": 28800 + } + }, + "private_network": { + "vpc_id": 1234, + "subnet_id": 5678, + "public_access": true + } + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/databases_mysql_instances_123_credentials.json b/test/fixtures/databases_mysql_instances_123_credentials.json new file mode 100644 index 000000000..217c27c00 --- /dev/null +++ b/test/fixtures/databases_mysql_instances_123_credentials.json @@ -0,0 +1,4 @@ +{ + "password": "s3cur3P@ssw0rd", + "username": "linroot" +} \ No newline at end of file diff --git a/test/fixtures/databases_mysql_instances_123_credentials_reset.json b/test/fixtures/databases_mysql_instances_123_credentials_reset.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/test/fixtures/databases_mysql_instances_123_credentials_reset.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/test/fixtures/databases_mysql_instances_123_patch.json b/test/fixtures/databases_mysql_instances_123_patch.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/test/fixtures/databases_mysql_instances_123_patch.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/test/fixtures/databases_mysql_instances_123_resume.json b/test/fixtures/databases_mysql_instances_123_resume.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/test/fixtures/databases_mysql_instances_123_resume.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/test/fixtures/databases_mysql_instances_123_ssl.json b/test/fixtures/databases_mysql_instances_123_ssl.json new file mode 100644 index 000000000..a331c5cd6 --- /dev/null +++ b/test/fixtures/databases_mysql_instances_123_ssl.json @@ -0,0 +1,3 @@ +{ + "ca_certificate": "LS0tLS1CRUdJ...==" +} \ No newline at end of file diff --git a/test/fixtures/databases_mysql_instances_123_suspend.json b/test/fixtures/databases_mysql_instances_123_suspend.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/test/fixtures/databases_mysql_instances_123_suspend.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/test/fixtures/databases_postgresql_config.json b/test/fixtures/databases_postgresql_config.json new file mode 100644 index 000000000..9a93d0aa9 --- /dev/null +++ b/test/fixtures/databases_postgresql_config.json @@ -0,0 +1,367 @@ +{ + "pg": { + "autovacuum_analyze_scale_factor": { + "description": "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size)", + "maximum": 1.0, + "minimum": 0.0, + "requires_restart": false, + "type": "number" + }, + "autovacuum_analyze_threshold": { + "description": "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", + "maximum": 2147483647, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_max_workers": { + "description": "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", + "maximum": 20, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_naptime": { + "description": "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute", + "maximum": 86400, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_cost_delay": { + "description": "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds", + "maximum": 100, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_cost_limit": { + "description": "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", + "maximum": 10000, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_scale_factor": { + "description": "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size)", + "maximum": 1.0, + "minimum": 0.0, + "requires_restart": false, + "type": "number" + }, + "autovacuum_vacuum_threshold": { + "description": "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples", + "maximum": 2147483647, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_delay": { + "description": "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", + "example": 200, + "maximum": 10000, + "minimum": 10, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_flush_after": { + "description": "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", + "example": 512, + "maximum": 2048, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_lru_maxpages": { + "description": "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", + "example": 100, + "maximum": 1073741823, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_lru_multiplier": { + "description": "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a \u201cjust in time\u201d policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", + "example": 2.0, + "maximum": 10, + "minimum": 0, + "requires_restart": false, + "type": "number" + }, + "deadlock_timeout": { + "description": "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", + "example": 1000, + "maximum": 1800000, + "minimum": 500, + "requires_restart": false, + "type": "integer" + }, + "default_toast_compression": { + "description": "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + "enum": [ + "lz4", + "pglz" + ], + "example": "lz4", + "requires_restart": false, + "type": "string" + }, + "idle_in_transaction_session_timeout": { + "description": "Time out sessions with open transactions after this number of milliseconds", + "maximum": 604800000, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "jit": { + "description": "Controls system-wide use of Just-in-Time Compilation (JIT).", + "example": true, + "requires_restart": false, + "type": "boolean" + }, + "max_files_per_process": { + "description": "PostgreSQL maximum number of files that can be open per process", + "maximum": 4096, + "minimum": 1000, + "requires_restart": false, + "type": "integer" + }, + "max_locks_per_transaction": { + "description": "PostgreSQL maximum locks per transaction", + "maximum": 6400, + "minimum": 64, + "requires_restart": false, + "type": "integer" + }, + "max_logical_replication_workers": { + "description": "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers)", + "maximum": 64, + "minimum": 4, + "requires_restart": false, + "type": "integer" + }, + "max_parallel_workers": { + "description": "Sets the maximum number of workers that the system can support for parallel queries", + "maximum": 96, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "max_parallel_workers_per_gather": { + "description": "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node", + "maximum": 96, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "max_pred_locks_per_transaction": { + "description": "PostgreSQL maximum predicate locks per transaction", + "maximum": 5120, + "minimum": 64, + "requires_restart": false, + "type": "integer" + }, + "max_replication_slots": { + "description": "PostgreSQL maximum replication slots", + "maximum": 64, + "minimum": 8, + "requires_restart": false, + "type": "integer" + }, + "max_slot_wal_keep_size": { + "description": "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", + "maximum": 2147483647, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "max_stack_depth": { + "description": "Maximum depth of the stack in bytes", + "maximum": 6291456, + "minimum": 2097152, + "requires_restart": false, + "type": "integer" + }, + "max_standby_archive_delay": { + "description": "Max standby archive delay in milliseconds", + "maximum": 43200000, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "max_standby_streaming_delay": { + "description": "Max standby streaming delay in milliseconds", + "maximum": 43200000, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "max_wal_senders": { + "description": "PostgreSQL maximum WAL senders", + "maximum": 64, + "minimum": 20, + "requires_restart": false, + "type": "integer" + }, + "max_worker_processes": { + "description": "Sets the maximum number of background processes that the system can support", + "maximum": 96, + "minimum": 8, + "requires_restart": false, + "type": "integer" + }, + "password_encryption": { + "description": "Chooses the algorithm for encrypting passwords.", + "enum": [ + "md5", + "scram-sha-256" + ], + "example": "scram-sha-256", + "requires_restart": false, + "type": [ + "string", + "null" + ] + }, + "pg_partman_bgw.interval": { + "description": "Sets the time interval to run pg_partman's scheduled tasks", + "example": 3600, + "maximum": 604800, + "minimum": 3600, + "requires_restart": false, + "type": "integer" + }, + "pg_partman_bgw.role": { + "description": "Controls which role to use for pg_partman's scheduled background tasks.", + "example": "myrolename", + "maxLength": 64, + "pattern": "^[_A-Za-z0-9][-._A-Za-z0-9]{0,63}$", + "requires_restart": false, + "type": "string" + }, + "pg_stat_monitor.pgsm_enable_query_plan": { + "description": "Enables or disables query plan monitoring", + "example": false, + "requires_restart": false, + "type": "boolean" + }, + "pg_stat_monitor.pgsm_max_buckets": { + "description": "Sets the maximum number of buckets", + "example": 10, + "maximum": 10, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "pg_stat_statements.track": { + "description": "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + "enum": [ + "all", + "top", + "none" + ], + "requires_restart": false, + "type": [ + "string" + ] + }, + "temp_file_limit": { + "description": "PostgreSQL temporary file limit in KiB, -1 for unlimited", + "example": 5000000, + "maximum": 2147483647, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "timezone": { + "description": "PostgreSQL service timezone", + "example": "Europe/Helsinki", + "maxLength": 64, + "pattern": "^[\\w/]*$", + "requires_restart": false, + "type": "string" + }, + "track_activity_query_size": { + "description": "Specifies the number of bytes reserved to track the currently executing command for each active session.", + "example": 1024, + "maximum": 10240, + "minimum": 1024, + "requires_restart": false, + "type": "integer" + }, + "track_commit_timestamp": { + "description": "Record commit time of transactions.", + "enum": [ + "off", + "on" + ], + "example": "off", + "requires_restart": false, + "type": "string" + }, + "track_functions": { + "description": "Enables tracking of function call counts and time used.", + "enum": [ + "all", + "pl", + "none" + ], + "requires_restart": false, + "type": "string" + }, + "track_io_timing": { + "description": "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + "enum": [ + "off", + "on" + ], + "example": "off", + "requires_restart": false, + "type": "string" + }, + "wal_sender_timeout": { + "description": "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", + "example": 60000, + "requires_restart": false, + "type": "integer" + }, + "wal_writer_delay": { + "description": "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance", + "example": 50, + "maximum": 200, + "minimum": 10, + "requires_restart": false, + "type": "integer" + } + }, + "pg_stat_monitor_enable": { + "description": "Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted. When this extension is enabled, pg_stat_statements results for utility commands are unreliable", + "requires_restart": true, + "type": "boolean" + }, + "pglookout": { + "max_failover_replication_time_lag": { + "description": "Number of seconds of master unavailability before triggering database failover to standby", + "maximum": 9223372036854775000, + "minimum": 10, + "requires_restart": false, + "type": "integer" + } + }, + "shared_buffers_percentage": { + "description": "Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.", + "example": 41.5, + "maximum": 60.0, + "minimum": 20.0, + "requires_restart": false, + "type": "number" + }, + "work_mem": { + "description": "Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).", + "example": 4, + "maximum": 1024, + "minimum": 1, + "requires_restart": false, + "type": "integer" + } +} \ No newline at end of file diff --git a/test/fixtures/databases_postgresql_instances.json b/test/fixtures/databases_postgresql_instances.json new file mode 100644 index 000000000..7e22cbbc1 --- /dev/null +++ b/test/fixtures/databases_postgresql_instances.json @@ -0,0 +1,97 @@ +{ + "data": [ + { + "allow_list": [ + "203.0.113.1/32", + "192.0.1.0/24" + ], + "cluster_size": 3, + "created": "2022-01-01T00:01:01", + "encrypted": false, + "engine": "postgresql", + "hosts": { + "primary": "lin-0000-000-pgsql-primary.servers.linodedb.net", + "standby": "lin-0000-000-pgsql-primary-private.servers.linodedb.net" + }, + "id": 123, + "label": "example-db", + "port": 3306, + "region": "us-east", + "replication_commit_type": "local", + "replication_type": "semi_synch", + "ssl_connection": true, + "status": "active", + "type": "g6-dedicated-2", + "updated": "2022-01-01T00:01:01", + "updates": { + "day_of_week": 1, + "duration": 3, + "frequency": "weekly", + "hour_of_day": 0, + "week_of_month": null + }, + "version": "13.2", + "engine_config": { + "pg": { + "autovacuum_analyze_scale_factor": 0.5, + "autovacuum_analyze_threshold": 100, + "autovacuum_max_workers": 10, + "autovacuum_naptime": 100, + "autovacuum_vacuum_cost_delay": 50, + "autovacuum_vacuum_cost_limit": 100, + "autovacuum_vacuum_scale_factor": 0.5, + "autovacuum_vacuum_threshold": 100, + "bgwriter_delay": 200, + "bgwriter_flush_after": 512, + "bgwriter_lru_maxpages": 100, + "bgwriter_lru_multiplier": 2.0, + "deadlock_timeout": 1000, + "default_toast_compression": "lz4", + "idle_in_transaction_session_timeout": 100, + "jit": true, + "max_files_per_process": 100, + "max_locks_per_transaction": 100, + "max_logical_replication_workers": 32, + "max_parallel_workers": 64, + "max_parallel_workers_per_gather": 64, + "max_pred_locks_per_transaction": 1000, + "max_replication_slots": 32, + "max_slot_wal_keep_size": 100, + "max_stack_depth": 3507152, + "max_standby_archive_delay": 1000, + "max_standby_streaming_delay": 1000, + "max_wal_senders": 32, + "max_worker_processes": 64, + "password_encryption": "scram-sha-256", + "pg_partman_bgw.interval": 3600, + "pg_partman_bgw.role": "myrolename", + "pg_stat_monitor.pgsm_enable_query_plan": false, + "pg_stat_monitor.pgsm_max_buckets": 10, + "pg_stat_statements.track": "top", + "temp_file_limit": 5000000, + "timezone": "Europe/Helsinki", + "track_activity_query_size": 1024, + "track_commit_timestamp": "off", + "track_functions": "all", + "track_io_timing": "off", + "wal_sender_timeout": 60000, + "wal_writer_delay": 50 + }, + "pg_stat_monitor_enable": true, + "pglookout": { + "max_failover_replication_time_lag": 1000 + }, + "shared_buffers_percentage": 41.5, + "work_mem": 4 + }, + "private_network": { + "vpc_id": 1234, + "subnet_id": 5678, + "public_access": true + } + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/databases_postgresql_instances_123_credentials.json b/test/fixtures/databases_postgresql_instances_123_credentials.json new file mode 100644 index 000000000..217c27c00 --- /dev/null +++ b/test/fixtures/databases_postgresql_instances_123_credentials.json @@ -0,0 +1,4 @@ +{ + "password": "s3cur3P@ssw0rd", + "username": "linroot" +} \ No newline at end of file diff --git a/test/fixtures/databases_postgresql_instances_123_credentials_reset.json b/test/fixtures/databases_postgresql_instances_123_credentials_reset.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/test/fixtures/databases_postgresql_instances_123_credentials_reset.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/test/fixtures/databases_postgresql_instances_123_patch.json b/test/fixtures/databases_postgresql_instances_123_patch.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/test/fixtures/databases_postgresql_instances_123_patch.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/test/fixtures/databases_postgresql_instances_123_resume.json b/test/fixtures/databases_postgresql_instances_123_resume.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/test/fixtures/databases_postgresql_instances_123_resume.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/test/fixtures/databases_postgresql_instances_123_ssl.json b/test/fixtures/databases_postgresql_instances_123_ssl.json new file mode 100644 index 000000000..a331c5cd6 --- /dev/null +++ b/test/fixtures/databases_postgresql_instances_123_ssl.json @@ -0,0 +1,3 @@ +{ + "ca_certificate": "LS0tLS1CRUdJ...==" +} \ No newline at end of file diff --git a/test/fixtures/databases_postgresql_instances_123_suspend.json b/test/fixtures/databases_postgresql_instances_123_suspend.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/test/fixtures/databases_postgresql_instances_123_suspend.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/test/fixtures/databases_types.json b/test/fixtures/databases_types.json new file mode 100644 index 000000000..d85232764 --- /dev/null +++ b/test/fixtures/databases_types.json @@ -0,0 +1,36 @@ +{ + "data": [ + { + "class": "nanode", + "deprecated": false, + "disk": 25600, + "engines": { + "mysql": [ + { + "price": { + "hourly": 0.03, + "monthly": 20 + }, + "quantity": 1 + } + ], + "postgresql": [ + { + "price": { + "hourly": 0.03, + "monthly": 20 + }, + "quantity": 1 + } + ] + }, + "id": "g6-nanode-1", + "label": "DBaaS - Nanode 1GB", + "memory": 1024, + "vcpus": 1 + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/domains.json b/test/fixtures/domains.json new file mode 100644 index 000000000..87fe96e40 --- /dev/null +++ b/test/fixtures/domains.json @@ -0,0 +1,23 @@ +{ + "page": 1, + "pages": 1, + "results": 1, + "data": [ + { + "domain": "example.org", + "type": "master", + "id": 12345, + "axfr_ips": [], + "retry_sec": 0, + "ttl_sec": 300, + "status": "active", + "master_ips": [], + "description": "", + "group": "", + "expire_sec": 0, + "soa_email": "test@example.org", + "refresh_sec": 0, + "tags": ["something"] + } + ] +} diff --git a/test/fixtures/domains_12345_clone.json b/test/fixtures/domains_12345_clone.json new file mode 100644 index 000000000..5ded999b6 --- /dev/null +++ b/test/fixtures/domains_12345_clone.json @@ -0,0 +1,19 @@ +{ + "axfr_ips": [], + "description": null, + "domain": "example.org", + "expire_sec": 300, + "group": null, + "id": 12345, + "master_ips": [], + "refresh_sec": 300, + "retry_sec": 300, + "soa_email": "admin@example.org", + "status": "active", + "tags": [ + "example tag", + "another example" + ], + "ttl_sec": 300, + "type": "master" +} diff --git a/test/fixtures/domains_12345_records.json b/test/fixtures/domains_12345_records.json new file mode 100644 index 000000000..fe90f3282 --- /dev/null +++ b/test/fixtures/domains_12345_records.json @@ -0,0 +1,15 @@ +{ + "created": "2018-01-01T00:01:01", + "id": 123456, + "name": "test", + "port": 80, + "priority": 50, + "protocol": null, + "service": null, + "tag": null, + "target": "192.0.2.0", + "ttl_sec": 604800, + "type": "A", + "updated": "2018-01-01T00:01:01", + "weight": 50 +} \ No newline at end of file diff --git a/test/fixtures/domains_12345_zone-file.json b/test/fixtures/domains_12345_zone-file.json new file mode 100644 index 000000000..7cb4ad591 --- /dev/null +++ b/test/fixtures/domains_12345_zone-file.json @@ -0,0 +1,12 @@ +{ + "zone_file": [ + "; example.com [123]", + "$TTL 864000", + "@ IN SOA ns1.linode.com. user.example.com. 2021000066 14400 14400 1209600 86400", + "@ NS ns1.linode.com.", + "@ NS ns2.linode.com.", + "@ NS ns3.linode.com.", + "@ NS ns4.linode.com.", + "@ NS ns5.linode.com." + ] +} diff --git a/test/fixtures/domains_import.json b/test/fixtures/domains_import.json new file mode 100644 index 000000000..f1a254afc --- /dev/null +++ b/test/fixtures/domains_import.json @@ -0,0 +1,19 @@ +{ + "axfr_ips": [], + "description": null, + "domain": "example.org", + "expire_sec": 300, + "group": null, + "id": 1234, + "master_ips": [], + "refresh_sec": 300, + "retry_sec": 300, + "soa_email": "admin@example.org", + "status": "active", + "tags": [ + "example tag", + "another example" + ], + "ttl_sec": 300, + "type": "master" +} \ No newline at end of file diff --git a/test/fixtures/images.json b/test/fixtures/images.json new file mode 100644 index 000000000..37b31445f --- /dev/null +++ b/test/fixtures/images.json @@ -0,0 +1,109 @@ +{ + "pages": 1, + "results": 4, + "page": 1, + "data": [ + { + "created": "2017-01-01T00:01:01", + "description": null, + "deprecated": false, + "status": "available", + "created_by": "linode", + "id": "linode/debian9", + "label": "Debian 9", + "size": 1100, + "is_public": true, + "type": "manual", + "vendor": "Debian", + "eol": "2026-07-01T04:00:00", + "expiry": "2026-08-01T04:00:00", + "updated": "2020-07-01T04:00:00", + "capabilities": [], + "tags": ["tests"], + "total_size": 1100, + "regions": [ + { + "region": "us-east", + "status": "available" + } + ], + "is_shared": false, + "image_sharing": null + }, + { + "created": "2017-01-01T00:01:01", + "description": null, + "deprecated": false, + "status": "available", + "created_by": "linode", + "id": "linode/ubuntu17.04", + "label": "Ubuntu 17.04", + "size": 1500, + "is_public": true, + "type": "manual", + "vendor": "Ubuntu", + "eol": "2026-07-01T04:00:00", + "expiry": "2026-08-01T04:00:00", + "updated": "2020-07-01T04:00:00", + "capabilities": [], + "tags": ["tests"], + "total_size": 3000, + "regions": [ + { + "region": "us-east", + "status": "available" + }, + { + "region": "us-mia", + "status": "pending" + } + ], + "is_shared": false, + "image_sharing": null + }, + { + "created": "2017-01-01T00:01:01", + "description": null, + "deprecated": false, + "status": "available", + "created_by": "linode", + "id": "linode/fedora26", + "label": "Fedora 26", + "size": 1500, + "is_public": true, + "type": "manual", + "vendor": "Fedora", + "eol": "2026-07-01T04:00:00", + "expiry": "2026-08-01T04:00:00", + "updated": "2020-07-01T04:00:00", + "capabilities": [], + "is_shared": false, + "image_sharing": null + }, + { + "created": "2017-08-20T14:01:01", + "description": null, + "deprecated": false, + "status": "available", + "created_by": "testguy", + "id": "private/123", + "label": "Gold Master", + "size": 650, + "is_public": false, + "type": "manual", + "vendor": null, + "eol": "2026-07-01T04:00:00", + "expiry": "2026-08-01T04:00:00", + "updated": "2020-07-01T04:00:00", + "capabilities": ["cloud-init"], + "is_shared": false, + "image_sharing": { + "shared_by": null, + "shared_with": { + "sharegroup_count": 0, + "sharegroup_list_url": "/images/private/123/sharegroups" + } + } + } + ] +} \ No newline at end of file diff --git a/test/fixtures/images_private_1234_sharegroups.json b/test/fixtures/images_private_1234_sharegroups.json new file mode 100644 index 000000000..925b12627 --- /dev/null +++ b/test/fixtures/images_private_1234_sharegroups.json @@ -0,0 +1,19 @@ +{ + "data": [ + { + "created": "2025-04-14T22:44:02", + "description": "My group of images to share with my team.", + "expiry": null, + "id": 1, + "images_count": 1, + "is_suspended": false, + "label": "My Shared Images", + "members_count": 0, + "updated": null, + "uuid": "1533863e-16a4-47b5-b829-ac0f35c13278" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/images_private_123_regions.json b/test/fixtures/images_private_123_regions.json new file mode 100644 index 000000000..5540fc116 --- /dev/null +++ b/test/fixtures/images_private_123_regions.json @@ -0,0 +1,29 @@ +{ + "created": "2017-08-20T14:01:01", + "description": null, + "deprecated": false, + "status": "available", + "created_by": "testguy", + "id": "private/123", + "label": "Gold Master", + "size": 650, + "is_public": false, + "type": "manual", + "vendor": null, + "eol": "2026-07-01T04:00:00", + "expiry": "2026-08-01T04:00:00", + "updated": "2020-07-01T04:00:00", + "capabilities": ["cloud-init"], + "tags": ["tests"], + "total_size": 1300, + "regions": [ + { + "region": "us-east", + "status": "available" + }, + { + "region": "us-west", + "status": "pending replication" + } + ] +} \ No newline at end of file diff --git a/test/fixtures/images_private_1337.json b/test/fixtures/images_private_1337.json new file mode 100644 index 000000000..b4deae196 --- /dev/null +++ b/test/fixtures/images_private_1337.json @@ -0,0 +1,17 @@ +{ + "created": "2021-08-14T22:44:02", + "created_by": "someone", + "deprecated": false, + "description": "very real image upload.", + "eol": "2026-07-01T04:00:00", + "expiry": null, + "id": "private/1337", + "is_public": false, + "label": "Realest Image Upload", + "size": 2500, + "status": "available", + "type": "manual", + "updated": "2021-08-14T22:44:02", + "vendor": "Debian", + "capabilities": ["cloud-init"] +} \ No newline at end of file diff --git a/test/fixtures/images_sharegroups.json b/test/fixtures/images_sharegroups.json new file mode 100644 index 000000000..53b54c07a --- /dev/null +++ b/test/fixtures/images_sharegroups.json @@ -0,0 +1,31 @@ +{ + "data": [ + { + "created": "2025-04-14T22:44:02", + "description": "My group of images to share with my team.", + "expiry": null, + "id": 1, + "images_count": 0, + "is_suspended": false, + "label": "My Shared Images", + "members_count": 0, + "updated": null, + "uuid": "1533863e-16a4-47b5-b829-ac0f35c13278" + }, + { + "created": "2025-04-14T22:44:03", + "description": "My other group of images to share with my team.", + "expiry": null, + "id": 2, + "images_count": 1, + "is_suspended": false, + "label": "My other Shared Images", + "members_count": 3, + "updated": null, + "uuid": "30ee6599-eb0f-478c-9e55-4073c6c24a39" + } + ], + "page": 1, + "pages": 1, + "results": 2 +} diff --git a/test/fixtures/images_sharegroups_1234.json b/test/fixtures/images_sharegroups_1234.json new file mode 100644 index 000000000..9817ea3d9 --- /dev/null +++ b/test/fixtures/images_sharegroups_1234.json @@ -0,0 +1,12 @@ +{ + "created": "2025-04-14T22:44:02", + "description": "My group of images to share with my team.", + "expiry": null, + "id": 1234, + "images_count": 0, + "is_suspended": false, + "label": "My Shared Images", + "members_count": 0, + "updated": null, + "uuid": "1533863e-16a4-47b5-b829-ac0f35c13278" +} \ No newline at end of file diff --git a/test/fixtures/images_sharegroups_1234_images.json b/test/fixtures/images_sharegroups_1234_images.json new file mode 100644 index 000000000..f63e52392 --- /dev/null +++ b/test/fixtures/images_sharegroups_1234_images.json @@ -0,0 +1,45 @@ +{ + "data": [ + { + "capabilities": [ + "cloud-init", + "distributed-sites" + ], + "created": "2021-08-14T22:44:02", + "created_by": null, + "deprecated": false, + "description": "Example image description.", + "eol": "2026-07-01T04:00:00", + "expiry": null, + "id": "shared/1", + "is_public": true, + "is_shared": null, + "label": "Debian 11", + "regions": [ + { + "region": "us-iad", + "status": "available" + } + ], + "size": 2500, + "status": "available", + "tags": [ + "repair-image", + "fix-1" + ], + "total_size": 1234567, + "type": "manual", + "updated": "2021-08-14T22:44:02", + "vendor": null, + "image_sharing": { + "shared_with": null, + "shared_by": { + "sharegroup_id": 1234, + "sharegroup_uuid": "0ee8e1c1-b19b-4052-9487-e3b13faac111", + "sharegroup_label": "test-group-minecraft-1", + "source_image_id": null + } + } + } + ] +} \ No newline at end of file diff --git a/test/fixtures/images_sharegroups_1234_images_shared_1.json b/test/fixtures/images_sharegroups_1234_images_shared_1.json new file mode 100644 index 000000000..1b1179c93 --- /dev/null +++ b/test/fixtures/images_sharegroups_1234_images_shared_1.json @@ -0,0 +1,41 @@ +{ + "capabilities": [ + "cloud-init", + "distributed-sites" + ], + "created": "2021-08-14T22:44:02", + "created_by": null, + "deprecated": false, + "description": "Example image description.", + "eol": "2026-07-01T04:00:00", + "expiry": null, + "id": "shared/1", + "is_public": true, + "is_shared": null, + "label": "Debian 11", + "regions": [ + { + "region": "us-iad", + "status": "available" + } + ], + "size": 2500, + "status": "available", + "tags": [ + "repair-image", + "fix-1" + ], + "total_size": 1234567, + "type": "manual", + "updated": "2021-08-14T22:44:02", + "vendor": null, + "image_sharing": { + "shared_with": null, + "shared_by": { + "sharegroup_id": 1234, + "sharegroup_uuid": "0ee8e1c1-b19b-4052-9487-e3b13faac111", + "sharegroup_label": "test-group-minecraft-1", + "source_image_id": null + } + } +} diff --git a/test/fixtures/images_sharegroups_1234_members.json b/test/fixtures/images_sharegroups_1234_members.json new file mode 100644 index 000000000..424f8b23c --- /dev/null +++ b/test/fixtures/images_sharegroups_1234_members.json @@ -0,0 +1,15 @@ +{ + "data": [ + { + "created": "2025-08-04T10:07:59", + "expiry": null, + "label": "New Member", + "status": "active", + "token_uuid": "4591075e-4ba8-43c9-a521-928c3d4a135d", + "updated": null + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/images_sharegroups_1234_members_abc123.json b/test/fixtures/images_sharegroups_1234_members_abc123.json new file mode 100644 index 000000000..156458ccc --- /dev/null +++ b/test/fixtures/images_sharegroups_1234_members_abc123.json @@ -0,0 +1,8 @@ +{ + "created": "2025-08-04T10:07:59", + "expiry": null, + "label": "New Member", + "status": "active", + "token_uuid": "abc123", + "updated": null +} \ No newline at end of file diff --git a/test/fixtures/images_sharegroups_tokens.json b/test/fixtures/images_sharegroups_tokens.json new file mode 100644 index 000000000..916ae8ae6 --- /dev/null +++ b/test/fixtures/images_sharegroups_tokens.json @@ -0,0 +1,18 @@ +{ + "data": [ + { + "created": "2025-08-04T10:09:09", + "expiry": null, + "label": "My Sharegroup Token", + "sharegroup_label": "A Sharegroup", + "sharegroup_uuid": "e1d0e58b-f89f-4237-84ab-b82077342359", + "status": "active", + "token_uuid": "13428362-5458-4dad-b14b-8d0d4d648f8c", + "updated": null, + "valid_for_sharegroup_uuid": "e1d0e58b-f89f-4237-84ab-b82077342359" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/images_sharegroups_tokens_abc123.json b/test/fixtures/images_sharegroups_tokens_abc123.json new file mode 100644 index 000000000..d7d4d045d --- /dev/null +++ b/test/fixtures/images_sharegroups_tokens_abc123.json @@ -0,0 +1,12 @@ +{ + "created": "2025-08-04T10:09:09", + "expiry": null, + "label": "My Sharegroup Token", + "sharegroup_label": "A Sharegroup", + "sharegroup_uuid": "e1d0e58b-f89f-4237-84ab-b82077342359", + "status": "active", + "token_uuid": "abc123", + "updated": null, + "valid_for_sharegroup_uuid": "e1d0e58b-f89f-4237-84ab-b82077342359", + "token": "asupersecrettoken" +} \ No newline at end of file diff --git a/test/fixtures/images_sharegroups_tokens_abc123_sharegroup.json b/test/fixtures/images_sharegroups_tokens_abc123_sharegroup.json new file mode 100644 index 000000000..2dfd5e928 --- /dev/null +++ b/test/fixtures/images_sharegroups_tokens_abc123_sharegroup.json @@ -0,0 +1,9 @@ +{ + "created": "2025-04-14T22:44:02", + "description": "Group of base operating system images and engineers used for CI/CD pipelines and infrastructure automation", + "id": 1234, + "is_suspended": false, + "label": "DevOps Base Images", + "updated": null, + "uuid": "1533863e-16a4-47b5-b829-ac0f35c13278" +} \ No newline at end of file diff --git a/test/fixtures/images_sharegroups_tokens_abc123_sharegroup_images.json b/test/fixtures/images_sharegroups_tokens_abc123_sharegroup_images.json new file mode 100644 index 000000000..f63e52392 --- /dev/null +++ b/test/fixtures/images_sharegroups_tokens_abc123_sharegroup_images.json @@ -0,0 +1,45 @@ +{ + "data": [ + { + "capabilities": [ + "cloud-init", + "distributed-sites" + ], + "created": "2021-08-14T22:44:02", + "created_by": null, + "deprecated": false, + "description": "Example image description.", + "eol": "2026-07-01T04:00:00", + "expiry": null, + "id": "shared/1", + "is_public": true, + "is_shared": null, + "label": "Debian 11", + "regions": [ + { + "region": "us-iad", + "status": "available" + } + ], + "size": 2500, + "status": "available", + "tags": [ + "repair-image", + "fix-1" + ], + "total_size": 1234567, + "type": "manual", + "updated": "2021-08-14T22:44:02", + "vendor": null, + "image_sharing": { + "shared_with": null, + "shared_by": { + "sharegroup_id": 1234, + "sharegroup_uuid": "0ee8e1c1-b19b-4052-9487-e3b13faac111", + "sharegroup_label": "test-group-minecraft-1", + "source_image_id": null + } + } + } + ] +} \ No newline at end of file diff --git a/test/fixtures/images_upload.json b/test/fixtures/images_upload.json new file mode 100644 index 000000000..893270130 --- /dev/null +++ b/test/fixtures/images_upload.json @@ -0,0 +1,21 @@ +{ + "image": { + "created": "2021-08-14T22:44:02", + "created_by": "someone", + "deprecated": false, + "description": "very real image upload.", + "eol": "2026-07-01T04:00:00", + "expiry": null, + "id": "private/1337", + "is_public": false, + "label": "Realest Image Upload", + "size": 2500, + "status": "available", + "type": "manual", + "updated": "2021-08-14T22:44:02", + "vendor": "Debian", + "capabilities": ["cloud-init"], + "tags": ["test_tag", "test2"] + }, + "upload_to": "https://linode.com/" +} \ No newline at end of file diff --git a/test/fixtures/linode_instances.json b/test/fixtures/linode_instances.json new file mode 100644 index 000000000..08cbe80c8 --- /dev/null +++ b/test/fixtures/linode_instances.json @@ -0,0 +1,145 @@ +{ + "page": 1, + "pages": 1, + "results": 2, + "data": [ + { + "group": "test", + "hypervisor": "kvm", + "id": 123, + "status": "running", + "type": "g6-standard-1", + "alerts": { + "network_in": 5, + "network_out": 5, + "cpu": 90, + "transfer_quota": 80, + "io": 5000 + }, + "label": "linode123", + "backups": { + "enabled": true, + "schedule": { + "window": "W02", + "day": "Scheduling" + } + }, + "specs": { + "memory": 2048, + "disk": 30720, + "vcpus": 1, + "transfer": 2000 + }, + "ipv6": "1234:abcd::1234:abcd:89ef:67cd/64", + "created": "2017-01-01T00:00:00", + "region": "us-east-1a", + "ipv4": [ + "123.45.67.89" + ], + "updated": "2017-01-01T00:00:00", + "image": "linode/ubuntu17.04", + "tags": [ + "something" + ], + "host_uuid": "3a3ddd59d9a78bb8de041391075df44de62bfec8", + "watchdog_enabled": true, + "disk_encryption": "disabled", + "lke_cluster_id": null, + "placement_group": { + "id": 123, + "label": "test", + "placement_group_type": "anti_affinity:local", + "placement_group_policy": "strict" + }, + "maintenance_policy" : "linode/migrate" + }, + { + "group": "test", + "hypervisor": "kvm", + "id": 456, + "status": "running", + "type": "g5-standard-1", + "alerts": { + "network_in": 5, + "network_out": 5, + "cpu": 90, + "transfer_quota": 80, + "io": 5000 + }, + "label": "linode456", + "backups": { + "enabled": false, + "schedule": { + "window": null, + "day": null + } + }, + "specs": { + "memory": 2048, + "disk": 30720, + "vcpus": 1, + "transfer": 2000 + }, + "ipv6": "1234:abcd::1234:abcd:89ef:67cd/64", + "created": "2017-01-01T00:00:00", + "region": "us-east-1a", + "ipv4": [ + "123.45.67.89" + ], + "updated": "2017-01-01T00:00:00", + "image": "linode/debian9", + "tags": [], + "host_uuid": "3a3ddd59d9a78bb8de041391075df44de62bfec8", + "watchdog_enabled": false, + "disk_encryption": "enabled", + "lke_cluster_id": 18881, + "placement_group": null + }, + { + "id": 124, + "status": "running", + "type": "g6-standard-1", + "alerts": { + "network_in": 5, + "network_out": 5, + "cpu": 90, + "transfer_quota": 80, + "io": 5000 + }, + "group": "test", + "hypervisor": "kvm", + "label": "linode124", + "backups": { + "enabled": true, + "schedule": { + "window": "W02", + "day": "Scheduling" + } + }, + "specs": { + "memory": 2048, + "disk": 30720, + "vcpus": 1, + "transfer": 2000 + }, + "ipv6": "1235:abcd::1234:abcd:89ef:67cd/64", + "created": "2017-01-01T00:00:00", + "region": "us-east-1", + "ipv4": [ + "124.45.67.89" + ], + "updated": "2017-01-01T00:00:00", + "image": "linode/ubuntu24.04", + "tags": [ + "something" + ], + "host_uuid": "3b3ddd59d9a78bb8de041391075df44de62bfec8", + "watchdog_enabled": true, + "disk_encryption": "disabled", + "lke_cluster_id": null, + "placement_group": null, + "interface_generation": "linode", + "maintenance_policy" : "linode/power_off_on" + } + ] +} diff --git a/test/fixtures/linode_instances_123_backups.json b/test/fixtures/linode_instances_123_backups.json new file mode 100644 index 000000000..94fe7f3b7 --- /dev/null +++ b/test/fixtures/linode_instances_123_backups.json @@ -0,0 +1,86 @@ +{ + "automatic": [ + { + "region": "us-east-1a", + "finished": "2018-01-09T00:01:01", + "updated": "2018-01-09T00:01:01", + "disks": [ + { + "size": 1024, + "label": "Debian 8.1 Disk", + "filesystem": "ext4" + }, + { + "size": 0, + "label": "256MB Swap Image", + "filesystem": "swap" + } + ], + "label": null, + "configs": [ + "My Debian 8.1 Profile" + ], + "id": 12345, + "status": "successful", + "created": "2018-01-09T00:01:01", + "type": "auto", + "available": true + }, + { + "region": "us-east-1a", + "finished": "2018-01-01T00:01:01", + "updated": "2018-01-01T00:01:01", + "disks": [ + { + "size": 1024, + "label": "Debian 8.1 Disk", + "filesystem": "ext4" + }, + { + "size": 0, + "label": "256MB Swap Image", + "filesystem": "swap" + } + ], + "label": null, + "configs": [ + "My Debian 8.1 Profile" + ], + "id": 12456, + "status": "successful", + "created": "2018-01-01T00:01:01", + "type": "auto", + "available": true + }, + { + "region": "us-east-1a", + "finished": "2018-01-07T00:01:01", + "updated": "2018-01-07T00:01:01", + "disks": [ + { + "size": 1024, + "label": "Debian 8.1 Disk", + "filesystem": "ext4" + }, + { + "size": 0, + "label": "256MB Swap Image", + "filesystem": "swap" + } + ], + "label": null, + "configs": [ + "My Debian 8.1 Profile" + ], + "id": 12567, + "status": "successful", + "created": "2018-01-07T00:01:01", + "type": "auto", + "available": false + } + ], + "snapshot": { + "in_progress": null, + "current": null + } +} diff --git a/test/fixtures/linode_instances_123_configs.json b/test/fixtures/linode_instances_123_configs.json new file mode 100644 index 000000000..082f8eefd --- /dev/null +++ b/test/fixtures/linode_instances_123_configs.json @@ -0,0 +1,86 @@ +{ + "data": [ + { + "root_device": "/dev/sda", + "comments": "", + "helpers": { + "updatedb_disabled": true, + "modules_dep": true, + "devtmpfs_automount": true, + "distro": true, + "network": false + }, + "label": "My Ubuntu 17.04 LTS Profile", + "created": "2014-10-07T20:04:00", + "memory_limit": 0, + "id": 456789, + "interfaces": [ + { + "id": 456, + "purpose": "public", + "primary": true + }, + { + "id": 123, + "purpose": "vpc", + "primary": true, + "active": true, + "vpc_id": 123456, + "subnet_id": 789, + "ipv4": { + "vpc": "10.0.0.2", + "nat_1_1": "any" + }, + "ipv6": { + "slaac": [ + { + "range": "1234::5678/64", + "address": "1234::5678" + } + ], + "ranges": [ + { + "range": "1234::5678/64" + } + ], + "is_public": true + }, + "ip_ranges": [ + "10.0.0.0/24" + ] + }, + { + "id": 321, + "primary": false, + "ipam_address": "10.0.0.2", + "label": "test-interface", + "purpose": "vlan" + } + ], + "run_level": "default", + "initrd": null, + "virt_mode": "paravirt", + "kernel": "linode/latest-64bit", + "updated": "2014-10-07T20:04:00", + "devices": { + "sda": { + "disk_id": 12345, + "volume_id": null + }, + "sdc": null, + "sde": null, + "sdh": null, + "sdg": null, + "sdb": { + "disk_id": 12346, + "volume_id": null + }, + "sdf": null, + "sdd": null + } + } + ], + "pages": 1, + "page": 1, + "results": 1 +} diff --git a/test/fixtures/linode_instances_123_configs_456789.json b/test/fixtures/linode_instances_123_configs_456789.json new file mode 100644 index 000000000..8f4387af9 --- /dev/null +++ b/test/fixtures/linode_instances_123_configs_456789.json @@ -0,0 +1,79 @@ +{ + "root_device": "/dev/sda", + "comments": "", + "helpers": { + "updatedb_disabled": true, + "modules_dep": true, + "devtmpfs_automount": true, + "distro": true, + "network": false + }, + "label": "My Ubuntu 17.04 LTS Profile", + "created": "2014-10-07T20:04:00", + "memory_limit": 0, + "id": 456789, + "interfaces": [ + { + "id": 456, + "purpose": "public", + "primary": true + }, + { + "id": 123, + "purpose": "vpc", + "primary": true, + "active": true, + "vpc_id": 123456, + "subnet_id": 789, + "ipv4": { + "vpc": "10.0.0.2", + "nat_1_1": "any" + }, + "ipv6": { + "slaac": [ + { + "range": "1234::5678/64", + "address": "1234::5678" + } + ], + "ranges": [ + { + "range": "1234::5678/64" + } + ], + "is_public": true + }, + "ip_ranges": [ + "10.0.0.0/24" + ] + }, + { + "id": 321, + "primary": false, + "ipam_address": "10.0.0.2", + "label": "test-interface", + "purpose": "vlan" + } + ], + "run_level": "default", + "initrd": null, + "virt_mode": "paravirt", + "kernel": "linode/latest-64bit", + "updated": "2014-10-07T20:04:00", + "devices": { + "sda": { + "disk_id": 12345, + "volume_id": null + }, + "sdc": null, + "sde": null, + "sdh": null, + "sdg": null, + "sdb": { + "disk_id": 12346, + "volume_id": null + }, + "sdf": null, + "sdd": null + } +} \ No newline at end of file diff --git a/test/fixtures/linode_instances_123_configs_456789_interfaces.json b/test/fixtures/linode_instances_123_configs_456789_interfaces.json new file mode 100644 index 000000000..120551365 --- /dev/null +++ b/test/fixtures/linode_instances_123_configs_456789_interfaces.json @@ -0,0 +1,48 @@ +{ + "data": [ + { + "id": 456, + "purpose": "public", + "primary": true + }, + { + "id": 123, + "purpose": "vpc", + "primary": true, + "active": true, + "vpc_id": 123456, + "subnet_id": 789, + "ipv4": { + "vpc": "10.0.0.2", + "nat_1_1": "any" + }, + "ipv6": { + "slaac": [ + { + "range": "1234::5678/64", + "address": "1234::5678" + } + ], + "ranges": [ + { + "range": "1234::5678/64" + } + ], + "is_public": true + }, + "ip_ranges": [ + "10.0.0.0/24" + ] + }, + { + "id": 321, + "primary": false, + "ipam_address": "10.0.0.2", + "label": "test-interface", + "purpose": "vlan" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/linode_instances_123_configs_456789_interfaces_123.json b/test/fixtures/linode_instances_123_configs_456789_interfaces_123.json new file mode 100644 index 000000000..c120905b2 --- /dev/null +++ b/test/fixtures/linode_instances_123_configs_456789_interfaces_123.json @@ -0,0 +1,29 @@ +{ + "id": 123, + "purpose": "vpc", + "primary": true, + "active": true, + "vpc_id": 123456, + "subnet_id": 789, + "ipv4": { + "vpc": "10.0.0.2", + "nat_1_1": "any" + }, + "ipv6": { + "slaac": [ + { + "range": "1234::5678/64", + "address": "1234::5678" + } + ], + "ranges": [ + { + "range": "1234::5678/64" + } + ], + "is_public": true + }, + "ip_ranges": [ + "10.0.0.0/24" + ] +} \ No newline at end of file diff --git a/test/fixtures/linode_instances_123_configs_456789_interfaces_123_put.json b/test/fixtures/linode_instances_123_configs_456789_interfaces_123_put.json new file mode 100644 index 000000000..684e26cf0 --- /dev/null +++ b/test/fixtures/linode_instances_123_configs_456789_interfaces_123_put.json @@ -0,0 +1,14 @@ +{ + "id": 123, + "purpose": "vpc", + "primary": false, + "vpc_id": 123456, + "subnet_id": 789, + "ipv4": { + "vpc": "10.0.0.3", + "nat_1_1": "any" + }, + "ip_ranges": [ + "10.0.0.0/24" + ] +} \ No newline at end of file diff --git a/test/fixtures/linode_instances_123_configs_456789_interfaces_321.json b/test/fixtures/linode_instances_123_configs_456789_interfaces_321.json new file mode 100644 index 000000000..d41133eb2 --- /dev/null +++ b/test/fixtures/linode_instances_123_configs_456789_interfaces_321.json @@ -0,0 +1,7 @@ +{ + "id": 321, + "primary": false, + "ipam_address":"10.0.0.2", + "label":"test-interface", + "purpose":"vlan" +} \ No newline at end of file diff --git a/test/fixtures/linode_instances_123_configs_456789_interfaces_456.json b/test/fixtures/linode_instances_123_configs_456789_interfaces_456.json new file mode 100644 index 000000000..94c7bc339 --- /dev/null +++ b/test/fixtures/linode_instances_123_configs_456789_interfaces_456.json @@ -0,0 +1,5 @@ +{ + "id": 456, + "purpose": "public", + "primary": true +} \ No newline at end of file diff --git a/test/fixtures/linode_instances_123_disks.json b/test/fixtures/linode_instances_123_disks.json new file mode 100644 index 000000000..ddfe7f313 --- /dev/null +++ b/test/fixtures/linode_instances_123_disks.json @@ -0,0 +1,27 @@ +{ + "page": 1, + "results": 2, + "pages": 1, + "data": [ + { + "size": 25088, + "status": "ready", + "filesystem": "ext4", + "id": 12345, + "updated": "2017-01-01T00:00:00", + "label": "Ubuntu 17.04 Disk", + "created": "2017-01-01T00:00:00", + "disk_encryption": "disabled" + }, + { + "size": 512, + "status": "ready", + "filesystem": "swap", + "id": 12346, + "updated": "2017-01-01T00:00:00", + "label": "512 MB Swap Image", + "created": "2017-01-01T00:00:00", + "disk_encryption": "disabled" + } + ] +} diff --git a/test/fixtures/linode_instances_123_disks_12345_clone.json b/test/fixtures/linode_instances_123_disks_12345_clone.json new file mode 100644 index 000000000..899833e56 --- /dev/null +++ b/test/fixtures/linode_instances_123_disks_12345_clone.json @@ -0,0 +1,11 @@ +{ + "size": 25088, + "status": "ready", + "filesystem": "ext4", + "id": 12345, + "updated": "2017-01-01T00:00:00", + "label": "Ubuntu 17.04 Disk", + "created": "2017-01-01T00:00:00", + "disk_encryption": "disabled" + } + \ No newline at end of file diff --git a/test/fixtures/linode_instances_123_firewalls.json b/test/fixtures/linode_instances_123_firewalls.json new file mode 100644 index 000000000..17a4a9199 --- /dev/null +++ b/test/fixtures/linode_instances_123_firewalls.json @@ -0,0 +1,56 @@ +{ + "data": [ + { + "created": "2018-01-01T00:01:01", + "id": 123, + "label": "firewall123", + "rules": { + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24" + ], + "ipv6": [ + "2001:DB8::/32" + ] + }, + "description": "An example firewall rule description.", + "label": "firewallrule123", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "inbound_policy": "DROP", + "outbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24" + ], + "ipv6": [ + "2001:DB8::/32" + ] + }, + "description": "An example firewall rule description.", + "label": "firewallrule123", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "outbound_policy": "DROP" + }, + "status": "enabled", + "tags": [ + "example tag", + "another example" + ], + "updated": "2018-01-02T00:01:01" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/linode_instances_123_ips.json b/test/fixtures/linode_instances_123_ips.json new file mode 100644 index 000000000..22d61f7b0 --- /dev/null +++ b/test/fixtures/linode_instances_123_ips.json @@ -0,0 +1,106 @@ +{ + "ipv4": { + "private": [ + { + "address": "192.168.133.234", + "gateway": null, + "linode_id": 123, + "prefix": 17, + "public": false, + "rdns": null, + "region": "us-east", + "subnet_mask": "255.255.128.0", + "type": "ipv4" + } + ], + "public": [ + { + "address": "97.107.143.141", + "gateway": "97.107.143.1", + "linode_id": 123, + "prefix": 24, + "public": true, + "rdns": "test.example.org", + "region": "us-east", + "subnet_mask": "255.255.255.0", + "type": "ipv4" + } + ], + "reserved": [ + { + "address": "97.107.143.141", + "gateway": "97.107.143.1", + "linode_id": 123, + "prefix": 24, + "public": true, + "rdns": "test.example.org", + "region": "us-east", + "subnet_mask": "255.255.255.0", + "type": "ipv4" + } + ], + "vpc": [ + { + "address": "10.0.0.2", + "address_range": null, + "vpc_id": 39246, + "subnet_id": 39388, + "region": "us-mia", + "linode_id": 55904908, + "config_id": 59036295, + "interface_id": 1186165, + "active": true, + "nat_1_1": "172.233.179.133", + "gateway": "10.0.0.1", + "prefix": 24, + "subnet_mask": "255.255.255.0" + } + ], + "shared": [ + { + "address": "97.107.143.141", + "gateway": "97.107.143.1", + "linode_id": 123, + "prefix": 24, + "public": true, + "rdns": "test.example.org", + "region": "us-east", + "subnet_mask": "255.255.255.0", + "type": "ipv4" + } + ] + }, + "ipv6": { + "global": [ + { + "prefix": 124, + "range": "2600:3c01::2:5000:0", + "region": "us-east", + "route_target": "2600:3c01::2:5000:f" + } + ], + "link_local": { + "address": "fe80::f03c:91ff:fe24:3a2f", + "gateway": "fe80::1", + "linode_id": 123, + "prefix": 64, + "public": false, + "rdns": null, + "region": "us-east", + "subnet_mask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "type": "ipv6" + }, + "slaac": { + "address": "2600:3c03::f03c:91ff:fe24:3a2f", + "gateway": "fe80::1", + "linode_id": 123, + "prefix": 64, + "public": true, + "rdns": null, + "region": "us-east", + "subnet_mask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "type": "ipv6" + } + } +} + \ No newline at end of file diff --git a/test/fixtures/linode_instances_123_nodebalancers.json b/test/fixtures/linode_instances_123_nodebalancers.json new file mode 100644 index 000000000..821ff4801 --- /dev/null +++ b/test/fixtures/linode_instances_123_nodebalancers.json @@ -0,0 +1,27 @@ +{ + "data": [ + { + "client_conn_throttle": 0, + "created": "2018-01-01T00:01:01", + "hostname": "192.0.2.1.ip.linodeusercontent.com", + "id": 12345, + "ipv4": "203.0.113.1", + "ipv6": null, + "label": "balancer12345", + "region": "us-east", + "tags": [ + "example tag", + "another example" + ], + "transfer": { + "in": 28.91200828552246, + "out": 3.5487728118896484, + "total": 32.46078109741211 + }, + "updated": "2018-03-01T00:01:01" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/linode_instances_123_transfer.json b/test/fixtures/linode_instances_123_transfer.json new file mode 100644 index 000000000..289cd2a6b --- /dev/null +++ b/test/fixtures/linode_instances_123_transfer.json @@ -0,0 +1,5 @@ +{ + "quota": 471, + "used": 10369075, + "billable": 0 +} diff --git a/test/fixtures/linode_instances_123_transfer_2023_4.json b/test/fixtures/linode_instances_123_transfer_2023_4.json new file mode 100644 index 000000000..3b9397efa --- /dev/null +++ b/test/fixtures/linode_instances_123_transfer_2023_4.json @@ -0,0 +1,6 @@ +{ + "bytes_in": 30471077120, + "bytes_out": 22956600198, + "bytes_total": 53427677318 + } + \ No newline at end of file diff --git a/test/fixtures/linode_instances_123_volumes.json b/test/fixtures/linode_instances_123_volumes.json new file mode 100644 index 000000000..63038e042 --- /dev/null +++ b/test/fixtures/linode_instances_123_volumes.json @@ -0,0 +1,24 @@ +{ + "data": [ + { + "created": "2018-01-01T00:01:01", + "filesystem_path": "/dev/disk/by-id/scsi-0Linode_Volume_my-volume", + "hardware_type": "nvme", + "id": 12345, + "label": "my-volume", + "linode_id": 12346, + "linode_label": "linode123", + "region": "us-east", + "size": 30, + "status": "active", + "tags": [ + "example tag", + "another example" + ], + "updated": "2018-01-01T00:01:01" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/linode_instances_124.json b/test/fixtures/linode_instances_124.json new file mode 100644 index 000000000..6c059ba41 --- /dev/null +++ b/test/fixtures/linode_instances_124.json @@ -0,0 +1,43 @@ +{ + "id": 124, + "status": "running", + "type": "g6-standard-1", + "alerts": { + "network_in": 5, + "network_out": 5, + "cpu": 90, + "transfer_quota": 80, + "io": 5000 + }, + "group": "test", + "hypervisor": "kvm", + "label": "linode124", + "backups": { + "enabled": true, + "schedule": { + "window": "W02", + "day": "Scheduling" + } + }, + "specs": { + "memory": 2048, + "disk": 30720, + "vcpus": 1, + "transfer": 2000 + }, + "ipv6": "1235:abcd::1234:abcd:89ef:67cd/64", + "created": "2017-01-01T00:00:00", + "region": "us-east-1", + "ipv4": [ + "124.45.67.89" + ], + "updated": "2017-01-01T00:00:00", + "image": "linode/ubuntu24.04", + "tags": ["something"], + "host_uuid": "3b3ddd59d9a78bb8de041391075df44de62bfec8", + "watchdog_enabled": true, + "disk_encryption": "disabled", + "lke_cluster_id": null, + "placement_group": null, + "interface_generation": "linode" +} \ No newline at end of file diff --git a/test/fixtures/linode_instances_124_interfaces.json b/test/fixtures/linode_instances_124_interfaces.json new file mode 100644 index 000000000..dbb6f79fb --- /dev/null +++ b/test/fixtures/linode_instances_124_interfaces.json @@ -0,0 +1,117 @@ +{ + "interfaces": [ + { + "created": "2025-01-01T00:01:01", + "default_route": { + "ipv4": true, + "ipv6": true + }, + "id": 123, + "mac_address": "22:00:AB:CD:EF:01", + "public": { + "ipv4": { + "addresses": [ + { + "address": "172.30.0.50", + "primary": true + } + ], + "shared": [ + { + "address": "172.30.0.51", + "linode_id": 125 + } + ] + }, + "ipv6": { + "ranges": [ + { + "range": "2600:3c09:e001:59::/64", + "route_target": "2600:3c09::ff:feab:cdef" + }, + { + "range": "2600:3c09:e001:5a::/64", + "route_target": "2600:3c09::ff:feab:cdef" + } + ], + "shared": [ + { + "range": "2600:3c09:e001:2a::/64", + "route_target": null + } + ], + "slaac": [ + { + "address": "2600:3c09::ff:feab:cdef", + "prefix": 64 + } + ] + } + }, + "updated": "2025-01-01T00:01:01", + "version": 1, + "vlan": null, + "vpc": null + }, + { + "id": 456, + "mac_address": "22:00:AB:CD:EF:01", + "created": "2024-01-01T00:01:01", + "updated": "2024-01-01T00:01:01", + "default_route": { + "ipv4": true + }, + "version": 1, + "vpc": { + "vpc_id": 123456, + "subnet_id": 789, + "ipv4": { + "addresses": [ + { + "address": "192.168.22.3", + "primary": true + } + ], + "ranges": [ + { + "range": "192.168.22.16/28" + }, + { + "range": "192.168.22.32/28" + } + ] + }, + "ipv6": { + "is_public": true, + "slaac": [ + { + "range": "1234::/64", + "address": "1234::5678" + } + ], + "ranges": [ + { + "range": "4321::/64" + } + ] + } + }, + "public": null, + "vlan": null + }, + { + "id": 789, + "mac_address": "22:00:AB:CD:EF:01", + "created": "2024-01-01T00:01:01", + "updated": "2024-01-01T00:01:01", + "default_route": {}, + "version": 1, + "vpc": null, + "public": null, + "vlan": { + "vlan_label": "my_vlan", + "ipam_address": "10.0.0.1/24" + } + } + ] +} \ No newline at end of file diff --git a/test/fixtures/linode_instances_124_interfaces_123.json b/test/fixtures/linode_instances_124_interfaces_123.json new file mode 100644 index 000000000..2dc912812 --- /dev/null +++ b/test/fixtures/linode_instances_124_interfaces_123.json @@ -0,0 +1,53 @@ +{ + "created": "2025-01-01T00:01:01", + "default_route": { + "ipv4": true, + "ipv6": true + }, + "id": 123, + "mac_address": "22:00:AB:CD:EF:01", + "public": { + "ipv4": { + "addresses": [ + { + "address": "172.30.0.50", + "primary": true + } + ], + "shared": [ + { + "address": "172.30.0.51", + "linode_id": 125 + } + ] + }, + "ipv6": { + "ranges": [ + { + "range": "2600:3c09:e001:59::/64", + "route_target": "2600:3c09::ff:feab:cdef" + }, + { + "range": "2600:3c09:e001:5a::/64", + "route_target": "2600:3c09::ff:feab:cdef" + } + ], + "shared": [ + { + "range": "2600:3c09:e001:2a::/64", + "route_target": null + } + ], + "slaac": [ + { + "address": "2600:3c09::ff:feab:cdef", + "prefix": 64 + } + ] + } + }, + "updated": "2025-01-01T00:01:01", + "version": 1, + "vlan": null, + "vpc": null +} \ No newline at end of file diff --git a/test/fixtures/linode_instances_124_interfaces_123_firewalls.json b/test/fixtures/linode_instances_124_interfaces_123_firewalls.json new file mode 100644 index 000000000..17a4a9199 --- /dev/null +++ b/test/fixtures/linode_instances_124_interfaces_123_firewalls.json @@ -0,0 +1,56 @@ +{ + "data": [ + { + "created": "2018-01-01T00:01:01", + "id": 123, + "label": "firewall123", + "rules": { + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24" + ], + "ipv6": [ + "2001:DB8::/32" + ] + }, + "description": "An example firewall rule description.", + "label": "firewallrule123", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "inbound_policy": "DROP", + "outbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24" + ], + "ipv6": [ + "2001:DB8::/32" + ] + }, + "description": "An example firewall rule description.", + "label": "firewallrule123", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "outbound_policy": "DROP" + }, + "status": "enabled", + "tags": [ + "example tag", + "another example" + ], + "updated": "2018-01-02T00:01:01" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/linode_instances_124_interfaces_456.json b/test/fixtures/linode_instances_124_interfaces_456.json new file mode 100644 index 000000000..8ec4abd3d --- /dev/null +++ b/test/fixtures/linode_instances_124_interfaces_456.json @@ -0,0 +1,42 @@ +{ + "id": 456, + "mac_address": "22:00:AB:CD:EF:01", + "created": "2024-01-01T00:01:01", + "updated": "2024-01-01T00:01:01", + "default_route": { + "ipv4":true + }, + "version": 1, + "vpc": { + "vpc_id": 123456, + "subnet_id": 789, + "ipv4": { + "addresses": [ + { + "address": "192.168.22.3", + "primary": true + } + ], + "ranges": [ + { "range": "192.168.22.16/28"}, + { "range": "192.168.22.32/28"} + ] + }, + "ipv6": { + "is_public": true, + "slaac": [ + { + "range": "1234::/64", + "address": "1234::5678" + } + ], + "ranges": [ + { + "range": "4321::/64" + } + ] + } + }, + "public": null, + "vlan": null +} diff --git a/test/fixtures/linode_instances_124_interfaces_789.json b/test/fixtures/linode_instances_124_interfaces_789.json new file mode 100644 index 000000000..d533b8e21 --- /dev/null +++ b/test/fixtures/linode_instances_124_interfaces_789.json @@ -0,0 +1,14 @@ +{ + "id": 789, + "mac_address": "22:00:AB:CD:EF:01", + "created": "2024-01-01T00:01:01", + "updated": "2024-01-01T00:01:01", + "default_route": {}, + "version": 1, + "vpc": null, + "public": null, + "vlan": { + "vlan_label": "my_vlan", + "ipam_address": "10.0.0.1/24" + } +} diff --git a/test/fixtures/linode_instances_124_interfaces_settings.json b/test/fixtures/linode_instances_124_interfaces_settings.json new file mode 100644 index 000000000..b454c438e --- /dev/null +++ b/test/fixtures/linode_instances_124_interfaces_settings.json @@ -0,0 +1,16 @@ +{ + "network_helper": true, + "default_route": { + "ipv4_interface_id": 123, + "ipv4_eligible_interface_ids": [ + 123, + 456, + 789 + ], + "ipv6_interface_id": 456, + "ipv6_eligible_interface_ids": [ + 123, + 456 + ] + } +} \ No newline at end of file diff --git a/test/fixtures/linode_instances_124_upgrade-interfaces.json b/test/fixtures/linode_instances_124_upgrade-interfaces.json new file mode 100644 index 000000000..fa1015029 --- /dev/null +++ b/test/fixtures/linode_instances_124_upgrade-interfaces.json @@ -0,0 +1,119 @@ +{ + "dry_run": true, + "config_id": 123, + "interfaces": [ + { + "created": "2025-01-01T00:01:01", + "default_route": { + "ipv4": true, + "ipv6": true + }, + "id": 123, + "mac_address": "22:00:AB:CD:EF:01", + "public": { + "ipv4": { + "addresses": [ + { + "address": "172.30.0.50", + "primary": true + } + ], + "shared": [ + { + "address": "172.30.0.51", + "linode_id": 125 + } + ] + }, + "ipv6": { + "ranges": [ + { + "range": "2600:3c09:e001:59::/64", + "route_target": "2600:3c09::ff:feab:cdef" + }, + { + "range": "2600:3c09:e001:5a::/64", + "route_target": "2600:3c09::ff:feab:cdef" + } + ], + "shared": [ + { + "range": "2600:3c09:e001:2a::/64", + "route_target": null + } + ], + "slaac": [ + { + "address": "2600:3c09::ff:feab:cdef", + "prefix": 64 + } + ] + } + }, + "updated": "2025-01-01T00:01:01", + "version": 1, + "vlan": null, + "vpc": null + }, + { + "id": 456, + "mac_address": "22:00:AB:CD:EF:01", + "created": "2024-01-01T00:01:01", + "updated": "2024-01-01T00:01:01", + "default_route": { + "ipv4": true + }, + "version": 1, + "vpc": { + "vpc_id": 123456, + "subnet_id": 789, + "ipv4": { + "addresses": [ + { + "address": "192.168.22.3", + "primary": true + } + ], + "ranges": [ + { + "range": "192.168.22.16/28" + }, + { + "range": "192.168.22.32/28" + } + ] + }, + "ipv6": { + "is_public": true, + "slaac": [ + { + "range": "1234::/64", + "address": "1234::5678" + } + ], + "ranges": [ + { + "range": "4321::/64" + } + ] + } + }, + "public": null, + "vlan": null + }, + { + "id": 789, + "mac_address": "22:00:AB:CD:EF:01", + "created": "2024-01-01T00:01:01", + "updated": "2024-01-01T00:01:01", + "default_route": {}, + "version": 1, + "vpc": null, + "public": null, + "vlan": { + "vlan_label": "my_vlan", + "ipam_address": "10.0.0.1/24" + } + } + ] +} \ No newline at end of file diff --git a/test/fixtures/linode_stackscripts_10079.json b/test/fixtures/linode_stackscripts_10079.json new file mode 100644 index 000000000..bf0fef197 --- /dev/null +++ b/test/fixtures/linode_stackscripts_10079.json @@ -0,0 +1,29 @@ +{ + "created": "2018-01-01T00:01:01", + "deployments_active": 1, + "deployments_total": 12, + "description": "This StackScript installs and configures MySQL\n", + "id": 10079, + "images": [ + "linode/debian9", + "linode/debian8" + ], + "is_public": true, + "label": "a-stackscript", + "mine": true, + "rev_note": "Set up MySQL", + "script": "\"#!/bin/bash\"\n", + "updated": "2018-01-01T00:01:01", + "user_defined_fields": [ + { + "default": null, + "example": "hunter2", + "label": "Enter the password", + "manyOf": "avalue,anothervalue,thirdvalue", + "name": "DB_PASSWORD", + "oneOf": "avalue,anothervalue,thirdvalue" + } + ], + "user_gravatar_id": "a445b305abda30ebc766bc7fda037c37", + "username": "myuser" +} \ No newline at end of file diff --git a/test/fixtures/linode_types.json b/test/fixtures/linode_types.json new file mode 100644 index 000000000..dee3209ee --- /dev/null +++ b/test/fixtures/linode_types.json @@ -0,0 +1,230 @@ +{ + "results": 5, + "pages": 1, + "page": 1, + "data": [ + { + "accelerated_devices": 0, + "disk": 20480, + "memory": 1024, + "transfer": 1000, + "addons": { + "backups": { + "price": { + "hourly": 0.003, + "monthly": 2 + }, + "region_prices": [ + { + "id": "ap-west", + "hourly": 0.02, + "monthly": 20 + }, + { + "id": "ap-northeast", + "hourly": 0.02, + "monthly": 20 + } + ] + } + }, + "class": "nanode", + "network_out": 1000, + "vcpus": 1, + "gpus": 0, + "id": "g6-nanode-1", + "label": "Linode 1024", + "price": { + "hourly": 0.0075, + "monthly": 5 + }, + "region_prices": [ + { + "id": "us-east", + "hourly": 0.02, + "monthly": 20 + }, + { + "id": "ap-northeast", + "hourly": 0.02, + "monthly": 20 + } + ], + "successor": null + }, + { + "accelerated_devices": 0, + "disk": 20480, + "memory": 16384, + "transfer": 5000, + "addons": { + "backups": { + "price": { + "hourly": 0.008, + "monthly": 5 + }, + "region_prices": [ + { + "id": "ap-west", + "hourly": 0.02, + "monthly": 20 + }, + { + "id": "ap-northeast", + "hourly": 0.02, + "monthly": 20 + } + ] + } + }, + "class": "highmem", + "network_out": 1000, + "vcpus": 1, + "gpus": 0, + "id": "g5-highmem-1", + "label": "Linode 16384", + "price": { + "hourly": 0.09, + "monthly": 60 + }, + "region_prices": [ + { + "id": "us-east", + "hourly": 0.02, + "monthly": 20 + }, + { + "id": "ap-northeast", + "hourly": 0.02, + "monthly": 20 + } + ], + "successor": null + }, + { + "accelerated_devices": 0, + "disk": 30720, + "memory": 2048, + "transfer": 2000, + "addons": { + "backups": { + "price": { + "hourly": 0.004, + "monthly": 2.5 + }, + "region_prices": [ + { + "id": "ap-west", + "hourly": 0.02, + "monthly": 20 + }, + { + "id": "ap-northeast", + "hourly": 0.02, + "monthly": 20 + } + ] + } + }, + "class": "standard", + "network_out": 1000, + "vcpus": 1, + "gpus": 0, + "id": "g6-standard-1", + "label": "Linode 2048", + "price": { + "hourly": 0.015, + "monthly": 10 + }, + "region_prices": [ + { + "id": "us-east", + "hourly": 0.02, + "monthly": 20 + }, + { + "id": "ap-northeast", + "hourly": 0.02, + "monthly": 20 + } + ], + "successor": null + }, + { + "accelerated_devices": 0, + "disk": 49152, + "memory": 4096, + "transfer": 3000, + "addons": { + "backups": { + "price": { + "hourly": 0.008, + "monthly": 5 + }, + "region_prices": [ + { + "id": "ap-west", + "hourly": 0.02, + "monthly": 20 + }, + { + "id": "ap-northeast", + "hourly": 0.02, + "monthly": 20 + } + ] + } + }, + "class": "gpu", + "network_out": 1000, + "vcpus": 2, + "gpus": 1, + "id": "g6-gpu-2", + "label": "Linode 4096", + "price": { + "hourly": 0.03, + "monthly": 20 + }, + "region_prices": [ + { + "id": "us-east", + "hourly": 0.02, + "monthly": 20 + }, + { + "id": "ap-northeast", + "hourly": 0.02, + "monthly": 20 + } + ], + "successor": null + }, + { + "id": "g1-accelerated-netint-vpu-t1u1-m", + "label": "Netint Quadra T1U x1 Medium", + "price": { + "hourly": 0.0, + "monthly": 0.0 + }, + "region_prices": [], + "addons": { + "backups": { + "price": { + "hourly": 0.0, + "monthly": 0.0 + }, + "region_prices": [] + } + }, + "memory": 24576, + "disk": 307200, + "transfer": 0, + "vcpus": 12, + "gpus": 0, + "network_out": 16000, + "class": "accelerated", + "successor": null, + "accelerated_devices": 1 + } + ] +} \ No newline at end of file diff --git a/test/fixtures/linode_types_g6-nanode-1.json b/test/fixtures/linode_types_g6-nanode-1.json new file mode 100644 index 000000000..8fc590638 --- /dev/null +++ b/test/fixtures/linode_types_g6-nanode-1.json @@ -0,0 +1,48 @@ +{ + "disk": 20480, + "memory": 1024, + "transfer": 1000, + "addons": { + "backups": { + "price": { + "hourly": 0.003, + "monthly": 2 + }, + "region_prices": [ + { + "id": "ap-west", + "hourly": 0.02, + "monthly": 20 + }, + { + "id": "ap-northeast", + "hourly": 0.02, + "monthly": 20 + } + ] + } + }, + "class": "nanode", + "network_out": 1000, + "vcpus": 1, + "gpus": 0, + "id": "g5-nanode-1", + "label": "Linode 1024", + "price": { + "hourly": 0.0075, + "monthly": 5 + }, + "region_prices": [ + { + "id": "us-east", + "hourly": 0.02, + "monthly": 20 + }, + { + "id": "ap-northeast", + "hourly": 0.02, + "monthly": 20 + } + ], + "successor": null +} diff --git a/test/fixtures/lke_clusters.json b/test/fixtures/lke_clusters.json new file mode 100644 index 000000000..1a932c8ec --- /dev/null +++ b/test/fixtures/lke_clusters.json @@ -0,0 +1,11 @@ +{ + "id": 18881, + "status": "ready", + "created": "2021-02-10T23:54:21", + "updated": "2021-02-10T23:54:21", + "label": "example-cluster", + "region": "ap-west", + "k8s_version": "1.19", + "tags": [], + "apl_enabled": true +} diff --git a/test/fixtures/lke_clusters_18881.json b/test/fixtures/lke_clusters_18881.json new file mode 100644 index 000000000..a520e49ea --- /dev/null +++ b/test/fixtures/lke_clusters_18881.json @@ -0,0 +1,15 @@ +{ + "id": 18881, + "status": "ready", + "created": "2021-02-10T23:54:21", + "updated": "2021-02-10T23:54:21", + "label": "example-cluster", + "region": "ap-west", + "k8s_version": "1.19", + "tier": "standard", + "tags": [], + "control_plane": { + "high_availability": true + }, + "apl_enabled": true +} \ No newline at end of file diff --git a/test/fixtures/lke_clusters_18881_control__plane__acl.json b/test/fixtures/lke_clusters_18881_control__plane__acl.json new file mode 100644 index 000000000..f4da34393 --- /dev/null +++ b/test/fixtures/lke_clusters_18881_control__plane__acl.json @@ -0,0 +1,13 @@ +{ + "acl": { + "enabled": true, + "addresses": { + "ipv4": [ + "10.0.0.1/32" + ], + "ipv6": [ + "1234::5678" + ] + } + } +} \ No newline at end of file diff --git a/test/fixtures/lke_clusters_18881_dashboard.json b/test/fixtures/lke_clusters_18881_dashboard.json new file mode 100644 index 000000000..eb58d587d --- /dev/null +++ b/test/fixtures/lke_clusters_18881_dashboard.json @@ -0,0 +1,3 @@ +{ + "url": "https://example.dashboard.linodelke.net" +} diff --git a/test/fixtures/lke_clusters_18881_nodes_123456.json b/test/fixtures/lke_clusters_18881_nodes_123456.json new file mode 100644 index 000000000..646b62f5d --- /dev/null +++ b/test/fixtures/lke_clusters_18881_nodes_123456.json @@ -0,0 +1,5 @@ +{ + "id": "123456", + "instance_id": 456, + "status": "ready" + } \ No newline at end of file diff --git a/test/fixtures/lke_clusters_18881_pools_456.json b/test/fixtures/lke_clusters_18881_pools_456.json new file mode 100644 index 000000000..7bf68a6f8 --- /dev/null +++ b/test/fixtures/lke_clusters_18881_pools_456.json @@ -0,0 +1,41 @@ +{ + "autoscaler": { + "enabled": true, + "max": 12, + "min": 3 + }, + "count": 6, + "disks": [ + { + "size": 1024, + "type": "ext-4" + } + ], + "id": 456, + "nodes": [ + { + "id": "123456", + "instance_id": 123458, + "status": "ready" + } + ], + "tags": [ + "example tag", + "another example" + ], + "taints": [ + { + "key": "foo", + "value": "bar", + "effect": "NoSchedule" + } + ], + "labels": { + "foo": "bar", + "bar": "foo" + }, + "label": "example-node-pool", + "firewall_id": 456, + "type": "g6-standard-4", + "disk_encryption": "enabled" + } \ No newline at end of file diff --git a/test/fixtures/lke_clusters_18882.json b/test/fixtures/lke_clusters_18882.json new file mode 100644 index 000000000..49548c018 --- /dev/null +++ b/test/fixtures/lke_clusters_18882.json @@ -0,0 +1,14 @@ +{ + "id": 18881, + "status": "ready", + "created": "2021-02-10T23:54:21", + "updated": "2021-02-10T23:54:21", + "label": "example-cluster-2", + "region": "ap-west", + "k8s_version": "1.31.1+lke1", + "tier": "enterprise", + "tags": [], + "control_plane": { + "high_availability": true + } +} \ No newline at end of file diff --git a/test/fixtures/lke_clusters_18882_pools_789.json b/test/fixtures/lke_clusters_18882_pools_789.json new file mode 100644 index 000000000..8a5ba21d8 --- /dev/null +++ b/test/fixtures/lke_clusters_18882_pools_789.json @@ -0,0 +1,20 @@ +{ + "id": 789, + "type": "g6-standard-2", + "label": "enterprise-node-pool", + "count": 3, + "nodes": [], + "disks": [], + "autoscaler": { + "enabled": false, + "min": 3, + "max": 3 + }, + "labels": {}, + "taints": [], + "tags": [], + "disk_encryption": "enabled", + "k8s_version": "1.31.1+lke1", + "firewall_id": 789, + "update_strategy": "rolling_update" +} \ No newline at end of file diff --git a/test/fixtures/lke_tiers_standard_versions.json b/test/fixtures/lke_tiers_standard_versions.json new file mode 100644 index 000000000..5dfeeb4ab --- /dev/null +++ b/test/fixtures/lke_tiers_standard_versions.json @@ -0,0 +1,19 @@ +{ + "data": [ + { + "id": "1.32", + "tier": "standard" + }, + { + "id": "1.31", + "tier": "standard" + }, + { + "id": "1.30", + "tier": "standard" + } + ], + "page": 1, + "pages": 1, + "results": 3 +} diff --git a/test/fixtures/lke_types.json b/test/fixtures/lke_types.json new file mode 100644 index 000000000..7d27a7f86 --- /dev/null +++ b/test/fixtures/lke_types.json @@ -0,0 +1,38 @@ +{ + "data": [ + { + "id": "lke-sa", + "label": "LKE Standard Availability", + "price": { + "hourly": 0, + "monthly": 0 + }, + "region_prices": [], + "transfer": 0 + }, + { + "id": "lke-ha", + "label": "LKE High Availability", + "price": { + "hourly": 0.09, + "monthly": 60 + }, + "region_prices": [ + { + "id": "id-cgk", + "hourly": 0.108, + "monthly": 72 + }, + { + "id": "br-gru", + "hourly": 0.126, + "monthly": 84 + } + ], + "transfer": 0 + } + ], + "page": 1, + "pages": 1, + "results": 2 +} \ No newline at end of file diff --git a/test/fixtures/lke_versions.json b/test/fixtures/lke_versions.json new file mode 100644 index 000000000..d4ed71b2b --- /dev/null +++ b/test/fixtures/lke_versions.json @@ -0,0 +1,10 @@ +{ + "data": [ + {"id": "1.19"}, + {"id": "1.18"}, + {"id": "1.17"} + ], + "page": 1, + "pages": 1, + "results": 3 +} diff --git a/test/fixtures/locks.json b/test/fixtures/locks.json new file mode 100644 index 000000000..b84056b6b --- /dev/null +++ b/test/fixtures/locks.json @@ -0,0 +1,27 @@ +{ + "data": [ + { + "id": 1, + "lock_type": "cannot_delete", + "entity": { + "id": 123, + "type": "linode", + "label": "test-linode", + "url": "/v4/linode/instances/123" + } + }, + { + "id": 2, + "lock_type": "cannot_delete_with_subresources", + "entity": { + "id": 456, + "type": "linode", + "label": "another-linode", + "url": "/v4/linode/instances/456" + } + } + ], + "page": 1, + "pages": 1, + "results": 2 +} diff --git a/test/fixtures/locks_1.json b/test/fixtures/locks_1.json new file mode 100644 index 000000000..ed7a802bf --- /dev/null +++ b/test/fixtures/locks_1.json @@ -0,0 +1,10 @@ +{ + "id": 1, + "lock_type": "cannot_delete", + "entity": { + "id": 123, + "type": "linode", + "label": "test-linode", + "url": "/v4/linode/instances/123" + } +} diff --git a/test/fixtures/longview_clients.json b/test/fixtures/longview_clients.json new file mode 100644 index 000000000..057ab9e2b --- /dev/null +++ b/test/fixtures/longview_clients.json @@ -0,0 +1,33 @@ +{ + "data": [ + { + "created": "2014-12-01T00:01:01", + "updated": "2016-06-02T00:01:01", + "id": 1234, + "label": "test_client_1", + "install_code": "12345678-ABCD-EF01-23456789ABCDEF12", + "apps": { + "nginx": false, + "mysql": false, + "apache": false + }, + "api_key": "12345678-ABCD-EF01-23456789ABCDEF12" + }, + { + "created": "2017-01-01T06:00:00", + "updated": "2017-01-01T06:00:00", + "id": 5678, + "label": "longview5678", + "install_code": "12345678-ABCD-EF01-23456789ABCDEF12", + "apps": { + "nginx": true, + "mysql": true, + "apache": true + }, + "api_key": "12345678-ABCD-EF01-23456789ABCDEF12" + } + ], + "results": 2, + "pages": 1, + "page": 1 +} diff --git a/test/fixtures/longview_plan.json b/test/fixtures/longview_plan.json new file mode 100644 index 000000000..f5f8503b2 --- /dev/null +++ b/test/fixtures/longview_plan.json @@ -0,0 +1,9 @@ +{ + "clients_included": 10, + "id": "longview-10", + "label": "Longview Pro 10 pack", + "price": { + "hourly": 0.06, + "monthly": 40 + } +} \ No newline at end of file diff --git a/test/fixtures/longview_subscriptions.json b/test/fixtures/longview_subscriptions.json new file mode 100644 index 000000000..dbd753537 --- /dev/null +++ b/test/fixtures/longview_subscriptions.json @@ -0,0 +1,43 @@ +{ + "page": 1, + "results": 4, + "data": [ + { + "id": "longview-10", + "label": "Longview Pro 10 pack", + "clients_included": 10, + "price": { + "hourly": 0.06, + "monthly": 40 + } + }, + { + "id": "longview-100", + "label": "Longview Pro 100 pack", + "clients_included": 100, + "price": { + "hourly": 0.3, + "monthly": 200 + } + }, + { + "id": "longview-3", + "label": "Longview Pro 3 pack", + "clients_included": 3, + "price": { + "hourly": 0.03, + "monthly": 20 + } + }, + { + "id": "longview-40", + "label": "Longview Pro 40 pack", + "clients_included": 40, + "price": { + "hourly": 0.15, + "monthly": 100 + } + } + ], + "pages": 1 +} diff --git a/test/fixtures/maintenance_policies.json b/test/fixtures/maintenance_policies.json new file mode 100644 index 000000000..409255a07 --- /dev/null +++ b/test/fixtures/maintenance_policies.json @@ -0,0 +1,28 @@ +{ + "data": [ + { + "slug": "linode/migrate", + "label": "Migrate", + "description": "Migrates the Linode to a new host while it remains fully operational. Recommended for maximizing availability.", + "type": "migrate", + "notification_period_sec": 3600, + "is_default": true + }, + { + "slug": "linode/power_off_on", + "label": "Power Off/Power On", + "description": "Powers off the Linode at the start of the maintenance event and reboots it once the maintenance finishes. Recommended for maximizing performance.", + "type": "power_off_on", + "notification_period_sec": 1800, + "is_default": false + }, + { + "slug": "private/12345", + "label": "Critical Workload - Avoid Migration", + "description": "Custom policy designed to power off and perform maintenance during user-defined windows only.", + "type": "power_off_on", + "notification_period_sec": 7200, + "is_default": false + } + ] +} \ No newline at end of file diff --git a/test/fixtures/mongodb.json b/test/fixtures/mongodb.json new file mode 100644 index 000000000..a331c5cd6 --- /dev/null +++ b/test/fixtures/mongodb.json @@ -0,0 +1,3 @@ +{ + "ca_certificate": "LS0tLS1CRUdJ...==" +} \ No newline at end of file diff --git a/test/fixtures/monitor_alert-definitions.json b/test/fixtures/monitor_alert-definitions.json new file mode 100644 index 000000000..92b6e0e4c --- /dev/null +++ b/test/fixtures/monitor_alert-definitions.json @@ -0,0 +1,26 @@ +{ + "data": [ + { + "id": 12345, + "label": "Test Alert for DBAAS", + "service_type": "dbaas", + "severity": 1, + "type": "user", + "description": "A test alert for dbaas service", + "entity_ids": ["13217"], + "alert_channels": [], + "has_more_resources": false, + "rule_criteria": null, + "trigger_conditions": null, + "class": "alert", + "notification_groups": [], + "status": "active", + "created": "2024-01-01T00:00:00", + "updated": "2024-01-01T00:00:00", + "updated_by": "tester" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/monitor_dashboards.json b/test/fixtures/monitor_dashboards.json new file mode 100644 index 000000000..5e56923a1 --- /dev/null +++ b/test/fixtures/monitor_dashboards.json @@ -0,0 +1,41 @@ +{ + "data": [ + { + "created": "2024-10-10T05:01:58", + "id": 1, + "label": "Resource Usage", + "service_type": "dbaas", + "type": "standard", + "updated": "2024-10-10T05:01:58", + "widgets": [ + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "CPU Usage", + "metric": "cpu_usage", + "size": 12, + "unit": "%", + "y_label": "cpu_usage", + "group_by": ["entity_id"], + "filters": null + }, + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "Disk I/O Write", + "metric": "write_iops", + "size": 6, + "unit": "IOPS", + "y_label": "write_iops", + "group_by": ["entity_id"], + "filters": null + } + ] + } + ], + "page": 1, + "pages": 1, + "results": 1 + } \ No newline at end of file diff --git a/test/fixtures/monitor_dashboards_1.json b/test/fixtures/monitor_dashboards_1.json new file mode 100644 index 000000000..afb5d71ee --- /dev/null +++ b/test/fixtures/monitor_dashboards_1.json @@ -0,0 +1,34 @@ +{ + "created": "2024-10-10T05:01:58", + "id": 1, + "label": "Resource Usage", + "service_type": "dbaas", + "type": "standard", + "updated": "2024-10-10T05:01:58", + "widgets": [ + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "CPU Usage", + "metric": "cpu_usage", + "size": 12, + "unit": "%", + "y_label": "cpu_usage", + "group_by": ["entity_id"], + "filters": null + }, + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "Available Memory", + "metric": "available_memory", + "size": 6, + "unit": "GB", + "y_label": "available_memory", + "group_by": ["entity_id"], + "filters": null + } + ] + } \ No newline at end of file diff --git a/test/fixtures/monitor_services.json b/test/fixtures/monitor_services.json new file mode 100644 index 000000000..7a568866c --- /dev/null +++ b/test/fixtures/monitor_services.json @@ -0,0 +1,11 @@ +{ + "data": [ + { + "label": "Databases", + "service_type": "dbaas" + } + ], + "page": 1, + "pages": 1, + "results": 1 + } \ No newline at end of file diff --git a/test/fixtures/monitor_services_dbaas.json b/test/fixtures/monitor_services_dbaas.json new file mode 100644 index 000000000..211833847 --- /dev/null +++ b/test/fixtures/monitor_services_dbaas.json @@ -0,0 +1,15 @@ +{ + "service_type": "dbaas", + "label": "Databases", + "alert": { + "polling_interval_seconds": [ + 300 + ], + "evaluation_period_seconds": [ + 300 + ], + "scope": [ + "entity" + ] + } +} \ No newline at end of file diff --git a/test/fixtures/monitor_services_dbaas_alert-definitions.json b/test/fixtures/monitor_services_dbaas_alert-definitions.json new file mode 100644 index 000000000..0c7067a8a --- /dev/null +++ b/test/fixtures/monitor_services_dbaas_alert-definitions.json @@ -0,0 +1,52 @@ +{ + "data": [ + { + "id": 12345, + "label": "Test Alert for DBAAS", + "service_type": "dbaas", + "severity": 1, + "type": "user", + "description": "A test alert for dbaas service", + "entity_ids": [ + "13217" + ], + "alert_channels": [], + "has_more_resources": false, + "rule_criteria": { + "rules": [ + { + "aggregate_function": "avg", + "dimension_filters": [ + { + "dimension_label": "node_type", + "label": "Node Type", + "operator": "eq", + "value": "primary" + } + ], + "label": "High CPU Usage", + "metric": "cpu_usage", + "operator": "gt", + "threshold": 90, + "unit": "percent" + } + ] + }, + "trigger_conditions": { + "criteria_condition": "ALL", + "evaluation_period_seconds": 300, + "polling_interval_seconds": 60, + "trigger_occurrences": 3 + }, + "class": "alert", + "notification_groups": [], + "status": "active", + "created": "2024-01-01T00:00:00", + "updated": "2024-01-01T00:00:00", + "updated_by": "tester" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/monitor_services_dbaas_alert-definitions_12345.json b/test/fixtures/monitor_services_dbaas_alert-definitions_12345.json new file mode 100644 index 000000000..822e18b24 --- /dev/null +++ b/test/fixtures/monitor_services_dbaas_alert-definitions_12345.json @@ -0,0 +1,44 @@ +{ + "id": 12345, + "label": "Test Alert for DBAAS", + "service_type": "dbaas", + "severity": 1, + "type": "user", + "description": "A test alert for dbaas service", + "entity_ids": [ + "13217" + ], + "alert_channels": [], + "has_more_resources": false, + "rule_criteria": { + "rules": [ + { + "aggregate_function": "avg", + "dimension_filters": [ + { + "dimension_label": "node_type", + "label": "Node Type", + "operator": "eq", + "value": "primary" + } + ], + "label": "High CPU Usage", + "metric": "cpu_usage", + "operator": "gt", + "threshold": 90, + "unit": "percent" + } + ] + }, + "trigger_conditions": { + "criteria_condition": "ALL", + "evaluation_period_seconds": 300, + "polling_interval_seconds": 60, + "trigger_occurrences": 3 + }, + "class": "alert", + "status": "active", + "created": "2024-01-01T00:00:00", + "updated": "2024-01-01T00:00:00", + "updated_by": "tester" +} diff --git a/test/fixtures/monitor_services_dbaas_dashboards.json b/test/fixtures/monitor_services_dbaas_dashboards.json new file mode 100644 index 000000000..e39a231b2 --- /dev/null +++ b/test/fixtures/monitor_services_dbaas_dashboards.json @@ -0,0 +1,48 @@ +{ + "data": [ + { + "created": "2024-10-10T05:01:58", + "id": 1, + "label": "Resource Usage", + "service_type": "dbaas", + "type": "standard", + "updated": "2024-10-10T05:01:58", + "widgets": [ + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "CPU Usage", + "metric": "cpu_usage", + "size": 12, + "unit": "%", + "y_label": "cpu_usage", + "group_by": ["entity_id"], + "filters": null + }, + { + "aggregate_function": "sum", + "chart_type": "area", + "color": "default", + "label": "Memory Usage", + "metric": "memory_usage", + "size": 6, + "unit": "%", + "y_label": "memory_usage", + "group_by": ["entity_id"], + "filters": [ + { + "dimension_label": "pattern", + "operator": "in", + "value": "publicout,privateout" + } + ] + + } + ] + } + ], + "page": 1, + "pages": 1, + "results": 1 + } \ No newline at end of file diff --git a/test/fixtures/monitor_services_dbaas_metric-definitions.json b/test/fixtures/monitor_services_dbaas_metric-definitions.json new file mode 100644 index 000000000..c493b23a3 --- /dev/null +++ b/test/fixtures/monitor_services_dbaas_metric-definitions.json @@ -0,0 +1,55 @@ +{ + "data": [ + { + "available_aggregate_functions": [ + "max", + "avg", + "min", + "sum" + ], + "dimensions": [ + { + "dimension_label": "node_type", + "label": "Node Type", + "values": [ + "primary", + "secondary" + ] + } + ], + "is_alertable": true, + "label": "CPU Usage", + "metric": "cpu_usage", + "metric_type": "gauge", + "scrape_interval": "60s", + "unit": "percent" + }, + { + "available_aggregate_functions": [ + "max", + "avg", + "min", + "sum" + ], + "dimensions": [ + { + "dimension_label": "node_type", + "label": "Node Type", + "values": [ + "primary", + "secondary" + ] + } + ], + "is_alertable": true, + "label": "Disk I/O Read", + "metric": "read_iops", + "metric_type": "gauge", + "scrape_interval": "60s", + "unit": "iops" + } + ], + "page": 1, + "pages": 1, + "results": 2 + } \ No newline at end of file diff --git a/test/fixtures/monitor_services_dbaas_metrics.json b/test/fixtures/monitor_services_dbaas_metrics.json new file mode 100644 index 000000000..67657cb78 --- /dev/null +++ b/test/fixtures/monitor_services_dbaas_metrics.json @@ -0,0 +1,47 @@ +{ + "data": { + "result": [ + { + "metric": { + "entity_id": 13316, + "metric_name": "avg_read_iops", + "node_id": "primary-9" + }, + "values": [ + [ + 1728996500, + "90.55555555555556" + ], + [ + 1729043400, + "14890.583333333334" + ] + ] + }, + { + "metric": { + "entity_id": 13217, + "metric_name": "avg_cpu_usage", + "node_id": "primary-0" + }, + "values": [ + [ + 1728996500, + "12.45" + ], + [ + 1729043400, + "18.67" + ] + ] + } + ], + "resultType": "matrix" + }, + "isPartial": false, + "stats": { + "executionTimeMsec": 21, + "seriesFetched": "2" + }, + "status": "success" +} \ No newline at end of file diff --git a/test/fixtures/monitor_services_dbaas_token.json b/test/fixtures/monitor_services_dbaas_token.json new file mode 100644 index 000000000..b1aa0d786 --- /dev/null +++ b/test/fixtures/monitor_services_dbaas_token.json @@ -0,0 +1,3 @@ +{ + "token": "abcdefhjigkfghh" +} \ No newline at end of file diff --git a/test/fixtures/monitor_services_linode_token.json b/test/fixtures/monitor_services_linode_token.json new file mode 100644 index 000000000..b1aa0d786 --- /dev/null +++ b/test/fixtures/monitor_services_linode_token.json @@ -0,0 +1,3 @@ +{ + "token": "abcdefhjigkfghh" +} \ No newline at end of file diff --git a/test/fixtures/network-transfer_prices.json b/test/fixtures/network-transfer_prices.json new file mode 100644 index 000000000..d595864ef --- /dev/null +++ b/test/fixtures/network-transfer_prices.json @@ -0,0 +1,38 @@ +{ + "data": [ + { + "id": "distributed_network_transfer", + "label": "Distributed Network Transfer", + "price": { + "hourly": 0.01, + "monthly": null + }, + "region_prices": [], + "transfer": 0 + }, + { + "id": "network_transfer", + "label": "Network Transfer", + "price": { + "hourly": 0.005, + "monthly": null + }, + "region_prices": [ + { + "id": "id-cgk", + "hourly": 0.015, + "monthly": null + }, + { + "id": "br-gru", + "hourly": 0.007, + "monthly": null + } + ], + "transfer": 0 + } + ], + "page": 1, + "pages": 1, + "results": 2 +} \ No newline at end of file diff --git a/test/fixtures/networking_firewalls.json b/test/fixtures/networking_firewalls.json new file mode 100644 index 000000000..0bd9660f1 --- /dev/null +++ b/test/fixtures/networking_firewalls.json @@ -0,0 +1,21 @@ +{ + "data":[ + { + "id":123, + "label":"test-firewall-1", + "created":"2018-01-01T00:01:01", + "updated":"2018-01-01T00:01:01", + "status":"enabled", + "rules":{ + "outbound":[], + "outbound_policy":"DROP", + "inbound":[], + "inbound_policy":"DROP" + }, + "tags":[] + } + ], + "page":1, + "pages":1, + "results":1 +} \ No newline at end of file diff --git a/test/fixtures/networking_firewalls_123.json b/test/fixtures/networking_firewalls_123.json new file mode 100644 index 000000000..c34a3991e --- /dev/null +++ b/test/fixtures/networking_firewalls_123.json @@ -0,0 +1,14 @@ +{ + "id":123, + "label":"test-firewall-1", + "created":"2018-01-01T00:01:01", + "updated":"2018-01-01T00:01:01", + "status":"enabled", + "rules":{ + "outbound":[], + "outbound_policy":"DROP", + "inbound":[], + "inbound_policy":"DROP" + }, + "tags":[] +} \ No newline at end of file diff --git a/test/fixtures/networking_firewalls_123_devices.json b/test/fixtures/networking_firewalls_123_devices.json new file mode 100644 index 000000000..e43e3725a --- /dev/null +++ b/test/fixtures/networking_firewalls_123_devices.json @@ -0,0 +1,29 @@ +{ + "data": [ + { + "created": "2018-01-01T00:01:01", + "entity": { + "id": 123, + "label": "my-linode", + "type": "linode", + "url": "/v4/linode/instances/123" + }, + "id": 123, + "updated": "2018-01-02T00:01:01" + }, + { + "created": "2018-01-01T00:01:01", + "entity": { + "id": 123, + "label": null, + "type": "interface", + "url": "/v4/linode/instances/123/interfaces/123" + }, + "id": 456, + "updated": "2018-01-02T00:01:01" + } + ], + "page": 1, + "pages": 1, + "results": 2 +} \ No newline at end of file diff --git a/test/fixtures/networking_firewalls_123_devices_123.json b/test/fixtures/networking_firewalls_123_devices_123.json new file mode 100644 index 000000000..ce536c684 --- /dev/null +++ b/test/fixtures/networking_firewalls_123_devices_123.json @@ -0,0 +1,11 @@ +{ + "created":"2018-01-01T00:01:01", + "entity":{ + "id":123, + "label":"my-linode", + "type":"linode", + "url":"/v4/linode/instances/123" + }, + "id":123, + "updated":"2018-01-02T00:01:01" +} \ No newline at end of file diff --git a/test/fixtures/networking_firewalls_123_devices_456.json b/test/fixtures/networking_firewalls_123_devices_456.json new file mode 100644 index 000000000..aa76901ee --- /dev/null +++ b/test/fixtures/networking_firewalls_123_devices_456.json @@ -0,0 +1,11 @@ +{ + "created": "2018-01-01T00:01:01", + "entity": { + "id": 123, + "label": null, + "type": "interface", + "url": "/v4/linode/instances/123/interfaces/123" + }, + "id": 456, + "updated": "2018-01-02T00:01:01" +} \ No newline at end of file diff --git a/test/fixtures/networking_firewalls_123_history.json b/test/fixtures/networking_firewalls_123_history.json new file mode 100644 index 000000000..13f2b0df7 --- /dev/null +++ b/test/fixtures/networking_firewalls_123_history.json @@ -0,0 +1,21 @@ +{ + "data": [ + { + "updated": "2025-03-07T17:06:36", + "status": "enabled", + "rules": { + "version": 1 + } + }, + { + "updated": "2025-03-07T17:06:36", + "status": "enabled", + "rules": { + "version": 2 + } + } + ], + "page": 1, + "pages": 1, + "results": 2 +} diff --git a/test/fixtures/networking_firewalls_123_history_rules_2.json b/test/fixtures/networking_firewalls_123_history_rules_2.json new file mode 100644 index 000000000..3819436f8 --- /dev/null +++ b/test/fixtures/networking_firewalls_123_history_rules_2.json @@ -0,0 +1,24 @@ +{ + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "0.0.0.0/0" + ], + "ipv6": [ + "ff00::/8" + ] + }, + "description": "A really cool firewall rule.", + "label": "really-cool-firewall-rule", + "ports": "80", + "protocol": "TCP" + } + ], + "inbound_policy": "ACCEPT", + "outbound": [], + "outbound_policy": "DROP", + "version": 2, + "fingerprint": "96c9568c" +} diff --git a/test/fixtures/networking_firewalls_123_rules.json b/test/fixtures/networking_firewalls_123_rules.json new file mode 100644 index 000000000..43c8af4dc --- /dev/null +++ b/test/fixtures/networking_firewalls_123_rules.json @@ -0,0 +1,6 @@ +{ + "inbound": [], + "inbound_policy": "DROP", + "outbound": [], + "outbound_policy": "DROP" +} \ No newline at end of file diff --git a/test/fixtures/networking_firewalls_settings.json b/test/fixtures/networking_firewalls_settings.json new file mode 100644 index 000000000..bfb7b2853 --- /dev/null +++ b/test/fixtures/networking_firewalls_settings.json @@ -0,0 +1,8 @@ +{ + "default_firewall_ids": { + "vpc_interface": 123, + "public_interface": 456, + "linode": 789, + "nodebalancer": 321 + } +} \ No newline at end of file diff --git a/test/fixtures/networking_firewalls_templates.json b/test/fixtures/networking_firewalls_templates.json new file mode 100644 index 000000000..b0267c7b4 --- /dev/null +++ b/test/fixtures/networking_firewalls_templates.json @@ -0,0 +1,93 @@ +{ + "data": [ + { + "slug": "public", + "rules": { + "outbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24", + "198.51.100.2/32" + ], + "ipv6": [ + "2001:DB8::/128" + ] + }, + "description": "test", + "label": "test-rule", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "outbound_policy": "DROP", + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24", + "198.51.100.2/32" + ], + "ipv6": [ + "2001:DB8::/128" + ] + }, + "description": "test", + "label": "test-rule", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "inbound_policy": "DROP" + } + }, + { + "slug": "vpc", + "rules": { + "outbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24", + "198.51.100.2/32" + ], + "ipv6": [ + "2001:DB8::/128" + ] + }, + "description": "test", + "label": "test-rule", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "outbound_policy": "DROP", + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24", + "198.51.100.2/32" + ], + "ipv6": [ + "2001:DB8::/128" + ] + }, + "description": "test", + "label": "test-rule", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "inbound_policy": "DROP" + } + } + ], + "page": 1, + "pages": 1, + "results": 2 +} \ No newline at end of file diff --git a/test/fixtures/networking_firewalls_templates_public.json b/test/fixtures/networking_firewalls_templates_public.json new file mode 100644 index 000000000..6b33e9f73 --- /dev/null +++ b/test/fixtures/networking_firewalls_templates_public.json @@ -0,0 +1,43 @@ +{ + "slug": "public", + "rules": { + "outbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24", + "198.51.100.2/32" + ], + "ipv6": [ + "2001:DB8::/128" + ] + }, + "description": "test", + "label": "test-rule", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "outbound_policy": "DROP", + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24", + "198.51.100.2/32" + ], + "ipv6": [ + "2001:DB8::/128" + ] + }, + "description": "test", + "label": "test-rule", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "inbound_policy": "DROP" + } +} \ No newline at end of file diff --git a/test/fixtures/networking_firewalls_templates_vpc.json b/test/fixtures/networking_firewalls_templates_vpc.json new file mode 100644 index 000000000..839bd6824 --- /dev/null +++ b/test/fixtures/networking_firewalls_templates_vpc.json @@ -0,0 +1,43 @@ +{ + "slug": "vpc", + "rules": { + "outbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24", + "198.51.100.2/32" + ], + "ipv6": [ + "2001:DB8::/128" + ] + }, + "description": "test", + "label": "test-rule", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "outbound_policy": "DROP", + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24", + "198.51.100.2/32" + ], + "ipv6": [ + "2001:DB8::/128" + ] + }, + "description": "test", + "label": "test-rule", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "inbound_policy": "DROP" + } +} \ No newline at end of file diff --git a/test/fixtures/networking_ips_127.0.0.1.json b/test/fixtures/networking_ips_127.0.0.1.json new file mode 100644 index 000000000..7abb0fabd --- /dev/null +++ b/test/fixtures/networking_ips_127.0.0.1.json @@ -0,0 +1,17 @@ +{ + "address": "127.0.0.1", + "gateway": "127.0.0.1", + "linode_id": 123, + "interface_id": 456, + "prefix": 24, + "public": true, + "rdns": "test.example.org", + "region": "us-east", + "subnet_mask": "255.255.255.0", + "type": "ipv4", + "vpc_nat_1_1": { + "vpc_id": 242, + "subnet_id": 194, + "address": "139.144.244.36" + } +} \ No newline at end of file diff --git a/test/fixtures/networking_ipv6_pools.json b/test/fixtures/networking_ipv6_pools.json new file mode 100644 index 000000000..aef9311e4 --- /dev/null +++ b/test/fixtures/networking_ipv6_pools.json @@ -0,0 +1,13 @@ +{ + "data": [ + { + "prefix": 124, + "range": "2600:3c01::2:5000:0", + "region": "us-east", + "route_target": "2600:3c01::2:5000:f" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/networking_ipv6_ranges.json b/test/fixtures/networking_ipv6_ranges.json new file mode 100644 index 000000000..589ae42e0 --- /dev/null +++ b/test/fixtures/networking_ipv6_ranges.json @@ -0,0 +1,13 @@ +{ + "data": [ + { + "prefix": 64, + "range": "2600:3c01::", + "region": "us-east", + "route_target": "2600:3c01::ffff:ffff:ffff:ffff" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/networking_ipv6_ranges_2600%3A3c01%3A%3A.json b/test/fixtures/networking_ipv6_ranges_2600%3A3c01%3A%3A.json new file mode 100644 index 000000000..7e7983a12 --- /dev/null +++ b/test/fixtures/networking_ipv6_ranges_2600%3A3c01%3A%3A.json @@ -0,0 +1,9 @@ +{ + "is_bgp": false, + "linodes": [ + 123 + ], + "prefix": 64, + "range": "2600:3c01::", + "region": "us-east" +} \ No newline at end of file diff --git a/test/fixtures/networking_vlans.json b/test/fixtures/networking_vlans.json new file mode 100644 index 000000000..c42094777 --- /dev/null +++ b/test/fixtures/networking_vlans.json @@ -0,0 +1,16 @@ +{ + "data": [ + { + "created": "2020-01-01T00:01:01", + "label": "vlan-test", + "linodes": [ + 111, + 222 + ], + "region": "us-southeast" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/nodebalancers.json b/test/fixtures/nodebalancers.json new file mode 100644 index 000000000..9b4dc8dae --- /dev/null +++ b/test/fixtures/nodebalancers.json @@ -0,0 +1,33 @@ +{ + "data": [ + { + "created": "2018-01-01T00:01:01", + "ipv6": "c001:d00d:b01::1:abcd:1234", + "region": "us-east-1a", + "ipv4": "12.34.56.789", + "hostname": "nb-12-34-56-789.newark.nodebalancer.linode.com", + "id": 123456, + "updated": "2018-01-01T00:01:01", + "label": "balancer123456", + "client_conn_throttle": 0, + "tags": ["something"], + "locks": ["cannot_delete_with_subresources"] + }, + { + "created": "2018-01-01T00:01:01", + "ipv6": "c001:d00d:b01::1:abcd:1256", + "region": "us-east-1a", + "ipv4": "12.34.56.890", + "hostname": "nb-12-34-56-890.newark.nodebalancer.linode.com", + "id": 123457, + "updated": "2018-01-01T00:01:01", + "label": "balancer123457", + "client_conn_throttle": 0, + "tags": [], + "locks": [] + } + ], + "results": 2, + "page": 1, + "pages": 1 +} diff --git a/test/fixtures/nodebalancers_123456.json b/test/fixtures/nodebalancers_123456.json new file mode 100644 index 000000000..a78c8d3e3 --- /dev/null +++ b/test/fixtures/nodebalancers_123456.json @@ -0,0 +1,17 @@ +{ + "created": "2018-01-01T00:01:01", + "ipv6": "c001:d00d:b01::1:abcd:1234", + "region": "us-east-1a", + "ipv4": "12.34.56.789", + "hostname": "nb-12-34-56-789.newark.nodebalancer.linode.com", + "id": 123456, + "updated": "2018-01-01T00:01:01", + "label": "balancer123456", + "client_conn_throttle": 0, + "tags": [ + "something" + ], + "locks": [ + "cannot_delete_with_subresources" + ] +} \ No newline at end of file diff --git a/test/fixtures/nodebalancers_123456_configs.json b/test/fixtures/nodebalancers_123456_configs.json new file mode 100644 index 000000000..cab9fb981 --- /dev/null +++ b/test/fixtures/nodebalancers_123456_configs.json @@ -0,0 +1,58 @@ +{ + "data": [ + { + "check": "connection", + "check_attempts": 2, + "stickiness": "table", + "check_interval": 5, + "check_body": "", + "id": 65432, + "check_passive": true, + "algorithm": "roundrobin", + "check_timeout": 3, + "check_path": "/", + "ssl_cert": null, + "ssl_commonname": "", + "port": 80, + "nodebalancer_id": 123456, + "cipher_suite": "recommended", + "ssl_key": null, + "nodes_status": { + "up": 0, + "down": 0 + }, + "protocol": "http", + "ssl_fingerprint": "", + "proxy_protocol": "none" + }, + { + "check": "connection", + "check_attempts": 2, + "stickiness": "table", + "check_interval": 5, + "check_body": "", + "id": 65431, + "check_passive": true, + "algorithm": "roundrobin", + "check_timeout": 3, + "check_path": "/", + "ssl_cert": null, + "ssl_commonname": "", + "port": 80, + "nodebalancer_id": 123456, + "cipher_suite": "none", + "ssl_key": null, + "nodes_status": { + "up": 0, + "down": 0 + }, + "protocol": "udp", + "ssl_fingerprint": "", + "proxy_protocol": "none", + "udp_check_port": 12345 + } + ], + "results": 2, + "page": 1, + "pages": 1 +} diff --git a/test/fixtures/nodebalancers_123456_configs_65432_nodes.json b/test/fixtures/nodebalancers_123456_configs_65432_nodes.json new file mode 100644 index 000000000..f8ffd9edf --- /dev/null +++ b/test/fixtures/nodebalancers_123456_configs_65432_nodes.json @@ -0,0 +1,27 @@ +{ + "data": [ + { + "id": 54321, + "address": "192.168.210.120", + "label": "node54321", + "status": "UP", + "weight": 50, + "mode": "accept", + "config_id": 54321, + "nodebalancer_id": 123456 + }, + { + "id": 12345, + "address": "192.168.210.120", + "label": "node12345", + "status": "UP", + "weight": 50, + "mode": "none", + "config_id": 123456, + "nodebalancer_id": 123456 + } + ], + "pages": 1, + "page": 1, + "results": 2 +} diff --git a/test/fixtures/nodebalancers_12345_configs_4567_rebuild.json b/test/fixtures/nodebalancers_12345_configs_4567_rebuild.json new file mode 100644 index 000000000..d4b6f0cbe --- /dev/null +++ b/test/fixtures/nodebalancers_12345_configs_4567_rebuild.json @@ -0,0 +1,25 @@ +{ + "algorithm": "roundrobin", + "check": "http_body", + "check_attempts": 3, + "check_body": "it works", + "check_interval": 90, + "check_passive": true, + "check_path": "/test", + "check_timeout": 10, + "cipher_suite": "recommended", + "id": 4567, + "nodebalancer_id": 12345, + "nodes_status": { + "down": 0, + "up": 4 + }, + "port": 80, + "protocol": "http", + "proxy_protocol": "none", + "ssl_cert": "", + "ssl_commonname": "www.example.com", + "ssl_fingerprint": "00:01:02:03:04:05:06:07:08:09:0A:0B:0C:0D:0E:0F:10:11:12:13", + "ssl_key": "", + "stickiness": "http_cookie" +} \ No newline at end of file diff --git a/test/fixtures/nodebalancers_12345_firewalls.json b/test/fixtures/nodebalancers_12345_firewalls.json new file mode 100644 index 000000000..17a4a9199 --- /dev/null +++ b/test/fixtures/nodebalancers_12345_firewalls.json @@ -0,0 +1,56 @@ +{ + "data": [ + { + "created": "2018-01-01T00:01:01", + "id": 123, + "label": "firewall123", + "rules": { + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24" + ], + "ipv6": [ + "2001:DB8::/32" + ] + }, + "description": "An example firewall rule description.", + "label": "firewallrule123", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "inbound_policy": "DROP", + "outbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": [ + "192.0.2.0/24" + ], + "ipv6": [ + "2001:DB8::/32" + ] + }, + "description": "An example firewall rule description.", + "label": "firewallrule123", + "ports": "22-24, 80, 443", + "protocol": "TCP" + } + ], + "outbound_policy": "DROP" + }, + "status": "enabled", + "tags": [ + "example tag", + "another example" + ], + "updated": "2018-01-02T00:01:01" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} diff --git a/test/fixtures/nodebalancers_12345_stats.json b/test/fixtures/nodebalancers_12345_stats.json new file mode 100644 index 000000000..5e1824609 --- /dev/null +++ b/test/fixtures/nodebalancers_12345_stats.json @@ -0,0 +1,16 @@ +{ + "data": { + "connections": [ + null + ], + "traffic": { + "in": [ + null + ], + "out": [ + null + ] + } + }, + "title": "linode.com - balancer12345 (12345) - day (5 min avg)" +} \ No newline at end of file diff --git a/test/fixtures/nodebalancers_types.json b/test/fixtures/nodebalancers_types.json new file mode 100644 index 000000000..9e5d3fa53 --- /dev/null +++ b/test/fixtures/nodebalancers_types.json @@ -0,0 +1,28 @@ +{ + "data": [ + { + "id": "nodebalancer", + "label": "NodeBalancer", + "price": { + "hourly": 0.015, + "monthly": 10 + }, + "region_prices": [ + { + "id": "id-cgk", + "hourly": 0.018, + "monthly": 12 + }, + { + "id": "br-gru", + "hourly": 0.021, + "monthly": 14 + } + ], + "transfer": 0 + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/object-storage_buckets.json b/test/fixtures/object-storage_buckets.json new file mode 100644 index 000000000..f99a944a6 --- /dev/null +++ b/test/fixtures/object-storage_buckets.json @@ -0,0 +1,15 @@ +{ + "data": [ + { + "cluster": "us-east-1", + "created": "2019-01-01T01:23:45", + "hostname": "example-bucket.us-east-1.linodeobjects.com", + "label": "example-bucket", + "objects": 4, + "size": 188318981 + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/object-storage_buckets_us-east-1.json b/test/fixtures/object-storage_buckets_us-east-1.json new file mode 100644 index 000000000..f1479dabb --- /dev/null +++ b/test/fixtures/object-storage_buckets_us-east-1.json @@ -0,0 +1,17 @@ +{ + "data": [ + { + "cluster": "us-east-1", + "created": "2019-01-01T01:23:45", + "hostname": "example-bucket.us-east-1.linodeobjects.com", + "label": "example-bucket", + "objects": 4, + "size": 188318981, + "endpoint_type": "E1", + "s3_endpoint": "us-east-12.linodeobjects.com" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/object-storage_buckets_us-east-1_example-bucket.json b/test/fixtures/object-storage_buckets_us-east-1_example-bucket.json new file mode 100644 index 000000000..c9c6344ee --- /dev/null +++ b/test/fixtures/object-storage_buckets_us-east-1_example-bucket.json @@ -0,0 +1,11 @@ +{ + "cluster": "us-east-1", + "region": "us-east", + "created": "2019-01-01T01:23:45", + "hostname": "example-bucket.us-east-1.linodeobjects.com", + "label": "example-bucket", + "objects": 4, + "size": 188318981, + "endpoint_type": "E1", + "s3_endpoint": "us-east-12.linodeobjects.com" +} \ No newline at end of file diff --git a/test/fixtures/object-storage_buckets_us-east-1_example-bucket_object-acl.json b/test/fixtures/object-storage_buckets_us-east-1_example-bucket_object-acl.json new file mode 100644 index 000000000..a9b9aaf34 --- /dev/null +++ b/test/fixtures/object-storage_buckets_us-east-1_example-bucket_object-acl.json @@ -0,0 +1,4 @@ +{ + "acl": "public-read", + "acl_xml": "..." +} \ No newline at end of file diff --git a/test/fixtures/object-storage_buckets_us-east-1_example-bucket_object-list.json b/test/fixtures/object-storage_buckets_us-east-1_example-bucket_object-list.json new file mode 100644 index 000000000..6d92be5e0 --- /dev/null +++ b/test/fixtures/object-storage_buckets_us-east-1_example-bucket_object-list.json @@ -0,0 +1,13 @@ +{ + "data": [ + { + "etag": "9f254c71e28e033bf9e0e5262e3e72ab", + "is_truncated": true, + "last_modified": "2019-01-01T01:23:45", + "name": "example", + "next_marker": "bd021c21-e734-4823-97a4-58b41c2cd4c8.892602.184", + "owner": "bfc70ab2-e3d4-42a4-ad55-83921822270c", + "size": 123 + } + ] +} \ No newline at end of file diff --git a/test/fixtures/object-storage_buckets_us-east-1_example-bucket_object-url.json b/test/fixtures/object-storage_buckets_us-east-1_example-bucket_object-url.json new file mode 100644 index 000000000..de617779b --- /dev/null +++ b/test/fixtures/object-storage_buckets_us-east-1_example-bucket_object-url.json @@ -0,0 +1,3 @@ +{ + "url": "https://us-east-1.linodeobjects.com/example-bucket/example?Signature=qr98TEucCntPgEG%2BsZQGDsJg93c%3D&Expires=1567609905&AWSAccessKeyId=G4YAF81XWY61DQM94SE0" +} \ No newline at end of file diff --git a/test/fixtures/object-storage_buckets_us-east-1_example-bucket_ssl.json b/test/fixtures/object-storage_buckets_us-east-1_example-bucket_ssl.json new file mode 100644 index 000000000..e16ebc332 --- /dev/null +++ b/test/fixtures/object-storage_buckets_us-east-1_example-bucket_ssl.json @@ -0,0 +1,3 @@ +{ + "ssl": true +} \ No newline at end of file diff --git a/test/fixtures/object-storage_buckets_us-east_example-bucket_access.json b/test/fixtures/object-storage_buckets_us-east_example-bucket_access.json new file mode 100644 index 000000000..852803146 --- /dev/null +++ b/test/fixtures/object-storage_buckets_us-east_example-bucket_access.json @@ -0,0 +1,6 @@ +{ + "acl": "authenticated-read", + "acl_xml": "..." +} \ No newline at end of file diff --git a/test/fixtures/object-storage_clusters.json b/test/fixtures/object-storage_clusters.json new file mode 100644 index 000000000..5df618ea2 --- /dev/null +++ b/test/fixtures/object-storage_clusters.json @@ -0,0 +1,14 @@ +{ + "pages": 1, + "page": 1, + "data": [ + { + "id": "us-east-1", + "status": "available", + "static_site_domain": "website-us-east-1.linodeobjects.com", + "region": "us-east", + "domain": "us-east-1.linodeobjects.com" + } + ], + "results": 1 +} diff --git a/test/fixtures/object-storage_keys.json b/test/fixtures/object-storage_keys.json new file mode 100644 index 000000000..0a9181658 --- /dev/null +++ b/test/fixtures/object-storage_keys.json @@ -0,0 +1,45 @@ +{ + "results": 2, + "pages": 1, + "data": [ + { + "id": 1, + "label": "object-storage-key-1", + "secret_key": "[REDACTED]", + "access_key": "testAccessKeyHere123", + "limited": false, + "regions": [ + { + "id": "us-east", + "s3_endpoint": "us-east-1.linodeobjects.com" + }, + { + "id": "us-west", + "s3_endpoint": "us-west-123.linodeobjects.com" + } + ] + }, + { + "id": 2, + "label": "object-storage-key-2", + "secret_key": "[REDACTED]", + "access_key": "testAccessKeyHere456", + "limited": true, + "bucket_access": [ + { + "cluster": "us-mia-1", + "bucket_name": "example-bucket", + "permissions": "read_only", + "region": "us-mia" + } + ], + "regions": [ + { + "id": "us-mia", + "s3_endpoint": "us-mia-1.linodeobjects.com" + } + ] + } + ], + "page": 1 +} \ No newline at end of file diff --git a/test/fixtures/object-storage_quotas.json b/test/fixtures/object-storage_quotas.json new file mode 100644 index 000000000..e831d7303 --- /dev/null +++ b/test/fixtures/object-storage_quotas.json @@ -0,0 +1,25 @@ +{ + "data": [ + { + "quota_id": "obj-objects-us-ord-1", + "quota_name": "Object Storage Maximum Objects", + "description": "Maximum number of Objects this customer is allowed to have on this endpoint.", + "endpoint_type": "E1", + "s3_endpoint": "us-iad-1.linodeobjects.com", + "quota_limit": 50, + "resource_metric": "object" + }, + { + "quota_id": "obj-bucket-us-ord-1", + "quota_name": "Object Storage Maximum Buckets", + "description": "Maximum number of buckets this customer is allowed to have on this endpoint.", + "endpoint_type": "E1", + "s3_endpoint": "us-iad-1.linodeobjects.com", + "quota_limit": 50, + "resource_metric": "bucket" + } + ], + "page": 1, + "pages": 1, + "results": 2 +} \ No newline at end of file diff --git a/test/fixtures/object-storage_quotas_obj-objects-us-ord-1.json b/test/fixtures/object-storage_quotas_obj-objects-us-ord-1.json new file mode 100644 index 000000000..e01d743c3 --- /dev/null +++ b/test/fixtures/object-storage_quotas_obj-objects-us-ord-1.json @@ -0,0 +1,9 @@ +{ + "quota_id": "obj-objects-us-ord-1", + "quota_name": "Object Storage Maximum Objects", + "description": "Maximum number of Objects this customer is allowed to have on this endpoint.", + "endpoint_type": "E1", + "s3_endpoint": "us-iad-1.linodeobjects.com", + "quota_limit": 50, + "resource_metric": "object" +} \ No newline at end of file diff --git a/test/fixtures/object-storage_quotas_obj-objects-us-ord-1_usage.json b/test/fixtures/object-storage_quotas_obj-objects-us-ord-1_usage.json new file mode 100644 index 000000000..59b306044 --- /dev/null +++ b/test/fixtures/object-storage_quotas_obj-objects-us-ord-1_usage.json @@ -0,0 +1,4 @@ +{ + "quota_limit": 100, + "usage": 10 +} diff --git a/test/fixtures/object-storage_transfer.json b/test/fixtures/object-storage_transfer.json new file mode 100644 index 000000000..ae1968e66 --- /dev/null +++ b/test/fixtures/object-storage_transfer.json @@ -0,0 +1,3 @@ +{ + "used": 12956600198 +} \ No newline at end of file diff --git a/test/fixtures/object-storage_types.json b/test/fixtures/object-storage_types.json new file mode 100644 index 000000000..029823580 --- /dev/null +++ b/test/fixtures/object-storage_types.json @@ -0,0 +1,23 @@ +{ + "data": [ + { + "id": "objectstorage", + "label": "Object Storage", + "price": { + "hourly": 0.0015, + "monthly": 0.1 + }, + "region_prices": [ + { + "hourly": 0.00018, + "id": "us-east", + "monthly": 0.12 + } + ], + "transfer": 0 + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/placement_groups.json b/test/fixtures/placement_groups.json new file mode 100644 index 000000000..bf05f9936 --- /dev/null +++ b/test/fixtures/placement_groups.json @@ -0,0 +1,33 @@ +{ + "data": [ + { + "id": 123, + "label": "test", + "region": "eu-west", + "placement_group_type": "anti_affinity:local", + "placement_group_policy": "strict", + "is_compliant": true, + "members": [ + { + "linode_id": 123, + "is_compliant": true + } + ], + "migrations": { + "inbound": [ + { + "linode_id": 123 + } + ], + "outbound": [ + { + "linode_id": 456 + } + ] + } + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/placement_groups_123.json b/test/fixtures/placement_groups_123.json new file mode 100644 index 000000000..c7a9cab27 --- /dev/null +++ b/test/fixtures/placement_groups_123.json @@ -0,0 +1,26 @@ +{ + "id": 123, + "label": "test", + "region": "eu-west", + "placement_group_type": "anti_affinity:local", + "placement_group_policy": "strict", + "is_compliant": true, + "members": [ + { + "linode_id": 123, + "is_compliant": true + } + ], + "migrations": { + "inbound": [ + { + "linode_id": 123 + } + ], + "outbound": [ + { + "linode_id": 456 + } + ] + } +} \ No newline at end of file diff --git a/test/fixtures/profile.json b/test/fixtures/profile.json new file mode 100644 index 000000000..2c62a70a0 --- /dev/null +++ b/test/fixtures/profile.json @@ -0,0 +1,24 @@ +{ + "authentication_type": "password", + "authorized_keys": [ + null + ], + "email": "example-user@gmail.com", + "email_notifications": true, + "ip_whitelist_enabled": false, + "lish_auth_method": "keys_only", + "referrals": { + "code": "871be32f49c1411b14f29f618aaf0c14637fb8d3", + "completed": 0, + "credit": 0, + "pending": 0, + "total": 0, + "url": "https://www.linode.com/?r=871be32f49c1411b14f29f618aaf0c14637fb8d3" + }, + "restricted": false, + "timezone": "US/Eastern", + "two_factor_auth": true, + "uid": 1234, + "username": "exampleUser", + "verified_phone_number": "+5555555555" +} \ No newline at end of file diff --git a/test/fixtures/profile_device_123.json b/test/fixtures/profile_device_123.json new file mode 100644 index 000000000..7505373ef --- /dev/null +++ b/test/fixtures/profile_device_123.json @@ -0,0 +1,8 @@ +{ + "created": "2018-01-01T01:01:01", + "expiry": "2018-01-31T01:01:01", + "id": 123, + "last_authenticated": "2018-01-05T12:57:12", + "last_remote_addr": "203.0.113.1", + "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36 Vivaldi/2.1.1337.36\n" +} diff --git a/test/fixtures/profile_devices.json b/test/fixtures/profile_devices.json new file mode 100644 index 000000000..0c8ce9322 --- /dev/null +++ b/test/fixtures/profile_devices.json @@ -0,0 +1,15 @@ +{ + "data": [ + { + "created": "2018-01-01T01:01:01", + "expiry": "2018-01-31T01:01:01", + "id": 123, + "last_authenticated": "2018-01-05T12:57:12", + "last_remote_addr": "203.0.113.1", + "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36 Vivaldi/2.1.1337.36\n" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/profile_logins.json b/test/fixtures/profile_logins.json new file mode 100644 index 000000000..53cc1d8d7 --- /dev/null +++ b/test/fixtures/profile_logins.json @@ -0,0 +1,15 @@ +{ + "data": [ + { + "datetime": "2018-01-01T00:01:01", + "id": 123, + "ip": "192.0.2.0", + "restricted": true, + "status": "successful", + "username": "example_user" + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/profile_logins_123.json b/test/fixtures/profile_logins_123.json new file mode 100644 index 000000000..0d700f79e --- /dev/null +++ b/test/fixtures/profile_logins_123.json @@ -0,0 +1,8 @@ +{ + "datetime": "2018-01-01T00:01:01", + "id": 123, + "ip": "192.0.2.0", + "restricted": true, + "status": "successful", + "username": "example_user" +} \ No newline at end of file diff --git a/test/fixtures/profile_preferences.json b/test/fixtures/profile_preferences.json new file mode 100644 index 000000000..9b24d09fa --- /dev/null +++ b/test/fixtures/profile_preferences.json @@ -0,0 +1,4 @@ +{ + "key1": "value1", + "key2": "value2" +} \ No newline at end of file diff --git a/test/fixtures/profile_security-questions.json b/test/fixtures/profile_security-questions.json new file mode 100644 index 000000000..7e7821853 --- /dev/null +++ b/test/fixtures/profile_security-questions.json @@ -0,0 +1,9 @@ +{ + "security_questions": [ + { + "id": 1, + "question": "In what city were you born?", + "response": "Gotham City" + } + ] +} \ No newline at end of file diff --git a/test/fixtures/profile_sshkeys.json b/test/fixtures/profile_sshkeys.json new file mode 100644 index 000000000..155a5a18d --- /dev/null +++ b/test/fixtures/profile_sshkeys.json @@ -0,0 +1,19 @@ +{ + "data": [ + { + "id": 22, + "label": "Home Ubuntu PC", + "ssh_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCDeWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWOTVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3imuc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRoF2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoCMGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qynZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfztUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36vTvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eHd1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23bcKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCICmhW7erNJNVxYjtzseGpBLmRRUTsT038w== dorthu@dorthu-command", + "created": "2018-09-14T13:00:00" + }, + { + "id": 72, + "label": "Work Laptop", + "ssh_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC29XlmxbDaEXd4O9eOfjVwHevLGnEocKwsgzQx62CD3FmZ6maZIKYy7J7Si3ct7l69Ic3NcWvl8enXLUUhmoho0whSKCizQFjotC8u+MJwJWFPd6ioYLRRIXH7l0ZQ/oYkxsK13nCx9CiirebM5OXRW4WJET+sYbnNa3cH/PkEDZHvr7vQ+kUxjlcQAMwQ/VF+VpeA7XRFVqvPJr73KEsdQGVc2ZGdaHp7xycgKOuFrTHJT5dVd4wk+3n3DVZgZgYQZim86MWB9TUDXgonDKG4VoQ2Pborxh5lLwElncHFn8digfdmVu5Pg7BtzsLLhfaQFl4EnuU072/WiNmS9I6bs1S4ExEIKOeUqfglT7ypDX2usulK69q4ZAfJHruqPG1+UobhbwwIS8zlFEhDmgWw2zYA9CYMn1WzcfPUKumG+qIOVzK1D+kV1V30WpnQi+BZtey8EKHU2pVdy3SrlZ6WPBnSRGJgNyP8Gq6L1vKFOXPg8RTrpnXMHJv7YwKy/boLwIPUJ/PBuIWo5oqWRYWsE1nAkAoVZE98gJFQGrvsUKsJkbJM4MP4KjFkFOMBkMPTyEubb96VypWzwGaEcpuXcULqvnzgA5Npg8ah/anrAHxUNk8Cc/QfnrKhjAqze4q1+mUR9OCursSs9rKfqd0g1Cbu7FNPw+OQevNI8zpmJw== wsmith@linode", + "created": "2018-09-14T13:30:00" + } + ], + "pages": 1, + "page": 1, + "results": 2 +} diff --git a/test/fixtures/regions.json b/test/fixtures/regions.json new file mode 100644 index 000000000..1482def37 --- /dev/null +++ b/test/fixtures/regions.json @@ -0,0 +1,246 @@ +{ + "data": [ + { + "id": "ap-west", + "country": "in", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Linode Interfaces" + ], + "status": "ok", + "resolvers": { + "ipv4": "172.105.34.5,172.105.35.5,172.105.36.5,172.105.37.5,172.105.38.5,172.105.39.5,172.105.40.5,172.105.41.5,172.105.42.5,172.105.43.5", + "ipv6": "2400:8904::f03c:91ff:fea5:659,2400:8904::f03c:91ff:fea5:9282,2400:8904::f03c:91ff:fea5:b9b3,2400:8904::f03c:91ff:fea5:925a,2400:8904::f03c:91ff:fea5:22cb,2400:8904::f03c:91ff:fea5:227a,2400:8904::f03c:91ff:fea5:924c,2400:8904::f03c:91ff:fea5:f7e2,2400:8904::f03c:91ff:fea5:2205,2400:8904::f03c:91ff:fea5:9207" + }, + "label": "label1", + "site_type": "core", + "placement_group_limits": { + "maximum_pgs_per_customer": 5, + "maximum_linodes_per_pg": 5 + } + }, + { + "id": "ca-central", + "country": "ca", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Linode Interfaces" + ], + "status": "ok", + "resolvers": { + "ipv4": "172.105.0.5,172.105.3.5,172.105.4.5,172.105.5.5,172.105.6.5,172.105.7.5,172.105.8.5,172.105.9.5,172.105.10.5,172.105.11.5", + "ipv6": "2600:3c04::f03c:91ff:fea9:f63,2600:3c04::f03c:91ff:fea9:f6d,2600:3c04::f03c:91ff:fea9:f80,2600:3c04::f03c:91ff:fea9:f0f,2600:3c04::f03c:91ff:fea9:f99,2600:3c04::f03c:91ff:fea9:fbd,2600:3c04::f03c:91ff:fea9:fdd,2600:3c04::f03c:91ff:fea9:fe2,2600:3c04::f03c:91ff:fea9:f68,2600:3c04::f03c:91ff:fea9:f4a" + }, + "label": "label2", + "site_type": "core", + "placement_group_limits": { + "maximum_pgs_per_customer": 5, + "maximum_linodes_per_pg": 5 + } + }, + { + "id": "ap-southeast", + "country": "au", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Linode Interfaces" + ], + "status": "ok", + "resolvers": { + "ipv4": "172.105.166.5,172.105.169.5,172.105.168.5,172.105.172.5,172.105.162.5,172.105.170.5,172.105.167.5,172.105.171.5,172.105.181.5,172.105.161.5", + "ipv6": "2400:8907::f03c:92ff:fe6e:ec8,2400:8907::f03c:92ff:fe6e:98e4,2400:8907::f03c:92ff:fe6e:1c58,2400:8907::f03c:92ff:fe6e:c299,2400:8907::f03c:92ff:fe6e:c210,2400:8907::f03c:92ff:fe6e:c219,2400:8907::f03c:92ff:fe6e:1c5c,2400:8907::f03c:92ff:fe6e:c24e,2400:8907::f03c:92ff:fe6e:e6b,2400:8907::f03c:92ff:fe6e:e3d" + }, + "label": "label3", + "site_type": "core" + }, + { + "id": "us-central", + "country": "us", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Linode Interfaces" + ], + "status": "ok", + "resolvers": { + "ipv4": "72.14.179.5,72.14.188.5,173.255.199.5,66.228.53.5,96.126.122.5,96.126.124.5,96.126.127.5,198.58.107.5,198.58.111.5,23.239.24.5", + "ipv6": "2600:3c00::2,2600:3c00::9,2600:3c00::7,2600:3c00::5,2600:3c00::3,2600:3c00::8,2600:3c00::6,2600:3c00::4,2600:3c00::c,2600:3c00::b" + }, + "label": "label4", + "site_type": "core", + "placement_group_limits": { + "maximum_pgs_per_customer": 5, + "maximum_linodes_per_pg": 5 + } + }, + { + "id": "us-west", + "country": "us", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Linode Interfaces" + ], + "status": "ok", + "resolvers": { + "ipv4": "173.230.145.5,173.230.147.5,173.230.155.5,173.255.212.5,173.255.219.5,173.255.241.5,173.255.243.5,173.255.244.5,74.207.241.5,74.207.242.5", + "ipv6": "2600:3c01::2,2600:3c01::9,2600:3c01::5,2600:3c01::7,2600:3c01::3,2600:3c01::8,2600:3c01::4,2600:3c01::b,2600:3c01::c,2600:3c01::6" + }, + "label": "label5", + "site_type": "core", + "placement_group_limits": { + "maximum_pgs_per_customer": 5, + "maximum_linodes_per_pg": 5 + } + }, + { + "id": "us-southeast", + "country": "us", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Linode Interfaces" + ], + "status": "ok", + "resolvers": { + "ipv4": "74.207.231.5,173.230.128.5,173.230.129.5,173.230.136.5,173.230.140.5,66.228.59.5,66.228.62.5,50.116.35.5,50.116.41.5,23.239.18.5", + "ipv6": "2600:3c02::3,2600:3c02::5,2600:3c02::4,2600:3c02::6,2600:3c02::c,2600:3c02::7,2600:3c02::2,2600:3c02::9,2600:3c02::8,2600:3c02::b" + }, + "label": "label6", + "site_type": "core", + "placement_group_limits": { + "maximum_pgs_per_customer": 5, + "maximum_linodes_per_pg": 5 + } + }, + { + "id": "us-east", + "country": "us", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Object Storage", + "Linode Interfaces" + ], + "monitors": { + "alerts": [ + "Managed Databases" + ], + "metrics": [ + "Managed Databases" + ] + }, + "status": "ok", + "resolvers": { + "ipv4": "66.228.42.5,96.126.106.5,50.116.53.5,50.116.58.5,50.116.61.5,50.116.62.5,66.175.211.5,97.107.133.4,207.192.69.4,207.192.69.5", + "ipv6": "2600:3c03::7,2600:3c03::4,2600:3c03::9,2600:3c03::6,2600:3c03::3,2600:3c03::c,2600:3c03::5,2600:3c03::b,2600:3c03::2,2600:3c03::8" + }, + "label": "label7", + "site_type": "core", + "placement_group_limits": { + "maximum_pgs_per_customer": 5, + "maximum_linodes_per_pg": 5 + } + }, + { + "id": "eu-west", + "country": "uk", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Linode Interfaces" + ], + "status": "ok", + "resolvers": { + "ipv4": "178.79.182.5,176.58.107.5,176.58.116.5,176.58.121.5,151.236.220.5,212.71.252.5,212.71.253.5,109.74.192.20,109.74.193.20,109.74.194.20", + "ipv6": "2a01:7e00::9,2a01:7e00::3,2a01:7e00::c,2a01:7e00::5,2a01:7e00::6,2a01:7e00::8,2a01:7e00::b,2a01:7e00::4,2a01:7e00::7,2a01:7e00::2" + }, + "label": "label8", + "site_type": "core", + "placement_group_limits": { + "maximum_pgs_per_customer": 5, + "maximum_linodes_per_pg": 5 + } + }, + { + "id": "ap-south", + "country": "sg", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Object Storage", + "Linode Interfaces" + ], + "status": "ok", + "resolvers": { + "ipv4": "139.162.11.5,139.162.13.5,139.162.14.5,139.162.15.5,139.162.16.5,139.162.21.5,139.162.27.5,103.3.60.18,103.3.60.19,103.3.60.20", + "ipv6": "2400:8901::5,2400:8901::4,2400:8901::b,2400:8901::3,2400:8901::9,2400:8901::2,2400:8901::8,2400:8901::7,2400:8901::c,2400:8901::6" + }, + "label": "label9", + "site_type": "core", + "placement_group_limits": { + "maximum_pgs_per_customer": 5, + "maximum_linodes_per_pg": 5 + } + }, + { + "id": "eu-central", + "country": "de", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Object Storage", + "Linode Interfaces" + ], + "status": "ok", + "resolvers": { + "ipv4": "139.162.130.5,139.162.131.5,139.162.132.5,139.162.133.5,139.162.134.5,139.162.135.5,139.162.136.5,139.162.137.5,139.162.138.5,139.162.139.5", + "ipv6": "2a01:7e01::5,2a01:7e01::9,2a01:7e01::7,2a01:7e01::c,2a01:7e01::2,2a01:7e01::4,2a01:7e01::3,2a01:7e01::6,2a01:7e01::b,2a01:7e01::8" + }, + "label": "label10", + "site_type": "core", + "placement_group_limits": { + "maximum_pgs_per_customer": 5, + "maximum_linodes_per_pg": 5 + } + }, + { + "id": "ap-northeast", + "country": "jp", + "capabilities": [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Linode Interfaces" + ], + "status": "ok", + "resolvers": { + "ipv4": "139.162.66.5,139.162.67.5,139.162.68.5,139.162.69.5,139.162.70.5,139.162.71.5,139.162.72.5,139.162.73.5,139.162.74.5,139.162.75.5", + "ipv6": "2400:8902::3,2400:8902::6,2400:8902::c,2400:8902::4,2400:8902::2,2400:8902::8,2400:8902::7,2400:8902::5,2400:8902::b,2400:8902::9" + }, + "label": "label11", + "site_type": "core", + "placement_group_limits": { + "maximum_pgs_per_customer": 5, + "maximum_linodes_per_pg": 5 + } + } + ], + "page": 1, + "pages": 1, + "results": 11 +} + diff --git a/test/fixtures/regions_availability.json b/test/fixtures/regions_availability.json new file mode 100644 index 000000000..ff5122df8 --- /dev/null +++ b/test/fixtures/regions_availability.json @@ -0,0 +1,507 @@ +{ + "data": [ + { + "region": "us-central", + "plan": "gpu-rtx6000-1.1", + "available": false + }, + { + "region": "us-central", + "plan": "gpu-rtx6000-2.1", + "available": false + }, + { + "region": "us-central", + "plan": "gpu-rtx6000-3.1", + "available": false + }, + { + "region": "us-central", + "plan": "gpu-rtx6000-4.1", + "available": false + }, + { + "region": "us-central", + "plan": "premium131072.7", + "available": false + }, + { + "region": "us-central", + "plan": "premium16384.7", + "available": false + }, + { + "region": "us-central", + "plan": "premium262144.7", + "available": false + }, + { + "region": "us-central", + "plan": "premium32768.7", + "available": false + }, + { + "region": "us-central", + "plan": "premium4096.7", + "available": false + }, + { + "region": "us-central", + "plan": "premium524288.7", + "available": false + }, + { + "region": "us-central", + "plan": "premium65536.7", + "available": false + }, + { + "region": "us-central", + "plan": "premium8192.7", + "available": false + }, + { + "region": "us-central", + "plan": "premium98304.7", + "available": false + }, + { + "region": "us-west", + "plan": "gpu-rtx6000-1.1", + "available": false + }, + { + "region": "us-west", + "plan": "gpu-rtx6000-2.1", + "available": false + }, + { + "region": "us-west", + "plan": "gpu-rtx6000-3.1", + "available": false + }, + { + "region": "us-west", + "plan": "gpu-rtx6000-4.1", + "available": false + }, + { + "region": "us-west", + "plan": "premium131072.7", + "available": false + }, + { + "region": "us-west", + "plan": "premium16384.7", + "available": false + }, + { + "region": "us-west", + "plan": "premium262144.7", + "available": false + }, + { + "region": "us-west", + "plan": "premium32768.7", + "available": false + }, + { + "region": "us-west", + "plan": "premium4096.7", + "available": false + }, + { + "region": "us-west", + "plan": "premium524288.7", + "available": false + }, + { + "region": "us-west", + "plan": "premium65536.7", + "available": false + }, + { + "region": "us-west", + "plan": "premium8192.7", + "available": false + }, + { + "region": "us-west", + "plan": "premium98304.7", + "available": false + }, + { + "region": "us-southeast", + "plan": "gpu-rtx6000-1.1", + "available": false + }, + { + "region": "us-southeast", + "plan": "gpu-rtx6000-2.1", + "available": false + }, + { + "region": "us-southeast", + "plan": "gpu-rtx6000-3.1", + "available": false + }, + { + "region": "us-southeast", + "plan": "gpu-rtx6000-4.1", + "available": false + }, + { + "region": "us-southeast", + "plan": "premium131072.7", + "available": false + }, + { + "region": "us-southeast", + "plan": "premium16384.7", + "available": false + }, + { + "region": "us-southeast", + "plan": "premium262144.7", + "available": false + }, + { + "region": "us-southeast", + "plan": "premium32768.7", + "available": false + }, + { + "region": "us-southeast", + "plan": "premium4096.7", + "available": false + }, + { + "region": "us-southeast", + "plan": "premium524288.7", + "available": false + }, + { + "region": "us-southeast", + "plan": "premium65536.7", + "available": false + }, + { + "region": "us-southeast", + "plan": "premium8192.7", + "available": false + }, + { + "region": "us-southeast", + "plan": "premium98304.7", + "available": false + }, + { + "region": "us-east", + "plan": "gpu-rtx6000-1.1", + "available": false + }, + { + "region": "us-east", + "plan": "gpu-rtx6000-2.1", + "available": false + }, + { + "region": "us-east", + "plan": "gpu-rtx6000-3.1", + "available": false + }, + { + "region": "us-east", + "plan": "gpu-rtx6000-4.1", + "available": false + }, + { + "region": "us-east", + "plan": "premium131072.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium16384.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium262144.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium32768.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium4096.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium524288.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium65536.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium8192.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium98304.7", + "available": false + }, + { + "region": "eu-west", + "plan": "gpu-rtx6000-1.1", + "available": false + }, + { + "region": "eu-west", + "plan": "gpu-rtx6000-2.1", + "available": false + }, + { + "region": "eu-west", + "plan": "gpu-rtx6000-3.1", + "available": false + }, + { + "region": "eu-west", + "plan": "gpu-rtx6000-4.1", + "available": false + }, + { + "region": "eu-west", + "plan": "premium131072.7", + "available": false + }, + { + "region": "eu-west", + "plan": "premium16384.7", + "available": false + }, + { + "region": "eu-west", + "plan": "premium262144.7", + "available": false + }, + { + "region": "eu-west", + "plan": "premium32768.7", + "available": false + }, + { + "region": "eu-west", + "plan": "premium4096.7", + "available": false + }, + { + "region": "eu-west", + "plan": "premium524288.7", + "available": false + }, + { + "region": "eu-west", + "plan": "premium65536.7", + "available": false + }, + { + "region": "eu-west", + "plan": "premium8192.7", + "available": false + }, + { + "region": "eu-west", + "plan": "premium98304.7", + "available": false + }, + { + "region": "ap-south", + "plan": "gpu-rtx6000-1.1", + "available": false + }, + { + "region": "ap-south", + "plan": "gpu-rtx6000-2.1", + "available": false + }, + { + "region": "ap-south", + "plan": "gpu-rtx6000-3.1", + "available": false + }, + { + "region": "ap-south", + "plan": "gpu-rtx6000-4.1", + "available": false + }, + { + "region": "ap-south", + "plan": "premium131072.7", + "available": false + }, + { + "region": "ap-south", + "plan": "premium16384.7", + "available": false + }, + { + "region": "ap-south", + "plan": "premium262144.7", + "available": false + }, + { + "region": "ap-south", + "plan": "premium32768.7", + "available": false + }, + { + "region": "ap-south", + "plan": "premium4096.7", + "available": false + }, + { + "region": "ap-south", + "plan": "premium524288.7", + "available": false + }, + { + "region": "ap-south", + "plan": "premium65536.7", + "available": false + }, + { + "region": "ap-south", + "plan": "premium8192.7", + "available": false + }, + { + "region": "ap-south", + "plan": "premium98304.7", + "available": false + }, + { + "region": "eu-central", + "plan": "gpu-rtx6000-1.1", + "available": false + }, + { + "region": "eu-central", + "plan": "gpu-rtx6000-2.1", + "available": false + }, + { + "region": "eu-central", + "plan": "gpu-rtx6000-3.1", + "available": false + }, + { + "region": "eu-central", + "plan": "gpu-rtx6000-4.1", + "available": false + }, + { + "region": "eu-central", + "plan": "premium131072.7", + "available": false + }, + { + "region": "eu-central", + "plan": "premium16384.7", + "available": false + }, + { + "region": "eu-central", + "plan": "premium262144.7", + "available": false + }, + { + "region": "eu-central", + "plan": "premium32768.7", + "available": false + }, + { + "region": "eu-central", + "plan": "premium4096.7", + "available": false + }, + { + "region": "eu-central", + "plan": "premium524288.7", + "available": false + }, + { + "region": "eu-central", + "plan": "premium65536.7", + "available": false + }, + { + "region": "eu-central", + "plan": "premium8192.7", + "available": false + }, + { + "region": "eu-central", + "plan": "premium98304.7", + "available": false + }, + { + "region": "ap-west", + "plan": "gpu-rtx6000-1.1", + "available": false + }, + { + "region": "ap-west", + "plan": "gpu-rtx6000-2.1", + "available": false + }, + { + "region": "ap-west", + "plan": "gpu-rtx6000-3.1", + "available": false + }, + { + "region": "ap-west", + "plan": "gpu-rtx6000-4.1", + "available": false + }, + { + "region": "ap-west", + "plan": "premium131072.7", + "available": false + }, + { + "region": "ap-west", + "plan": "premium16384.7", + "available": false + }, + { + "region": "ap-west", + "plan": "premium262144.7", + "available": false + }, + { + "region": "ap-west", + "plan": "premium32768.7", + "available": false + }, + { + "region": "ap-west", + "plan": "premium4096.7", + "available": false + } + ], + "page": 1, + "pages": 3, + "results": 299 +} \ No newline at end of file diff --git a/test/fixtures/regions_us-east_availability.json b/test/fixtures/regions_us-east_availability.json new file mode 100644 index 000000000..f7dc11ea2 --- /dev/null +++ b/test/fixtures/regions_us-east_availability.json @@ -0,0 +1,67 @@ +[ + { + "region": "us-east", + "plan": "gpu-rtx6000-1.1", + "available": false + }, + { + "region": "us-east", + "plan": "gpu-rtx6000-2.1", + "available": false + }, + { + "region": "us-east", + "plan": "gpu-rtx6000-3.1", + "available": false + }, + { + "region": "us-east", + "plan": "gpu-rtx6000-4.1", + "available": false + }, + { + "region": "us-east", + "plan": "premium131072.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium16384.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium262144.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium32768.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium4096.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium524288.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium65536.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium8192.7", + "available": false + }, + { + "region": "us-east", + "plan": "premium98304.7", + "available": false + } +] \ No newline at end of file diff --git a/test/fixtures/regions_us-east_vpc-availability.json b/test/fixtures/regions_us-east_vpc-availability.json new file mode 100644 index 000000000..209959e5d --- /dev/null +++ b/test/fixtures/regions_us-east_vpc-availability.json @@ -0,0 +1,5 @@ +{ + "region": "us-east", + "available": true, + "available_ipv6_prefix_lengths": [52, 48] +} diff --git a/test/fixtures/regions_vpc-availability.json b/test/fixtures/regions_vpc-availability.json new file mode 100644 index 000000000..5e4d386df --- /dev/null +++ b/test/fixtures/regions_vpc-availability.json @@ -0,0 +1,132 @@ +{ + "data": [ + { + "region": "us-east", + "available": true, + "available_ipv6_prefix_lengths": [52, 48] + }, + { + "region": "us-west", + "available": true, + "available_ipv6_prefix_lengths": [56, 52, 48] + }, + { + "region": "nl-ams", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "us-ord", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "us-iad", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "fr-par", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "us-sea", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "br-gru", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "se-sto", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "es-mad", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "in-maa", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "jp-osa", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "it-mil", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "us-mia", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "id-cgk", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "us-lax", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "gb-lon", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "au-mel", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "in-bom-2", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "de-fra-2", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "sg-sin-2", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "jp-tyo-3", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "fr-par-2", + "available": true, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "ca-central", + "available": false, + "available_ipv6_prefix_lengths": [] + }, + { + "region": "ap-southeast", + "available": false, + "available_ipv6_prefix_lengths": [] + } + ], + "page": 1, + "pages": 2, + "results": 50 +} diff --git a/test/fixtures/support_tickets_123.json b/test/fixtures/support_tickets_123.json new file mode 100644 index 000000000..4a568f111 --- /dev/null +++ b/test/fixtures/support_tickets_123.json @@ -0,0 +1,22 @@ +{ + "attachments": [ + null + ], + "closable": false, + "closed": "2015-06-04T16:07:03", + "description": "I'm having trouble setting the root password on my Linode. I tried following the instructions but something is not working and I'm not sure what I'm doing wrong. Can you please help me figure out how I can reset it?\n", + "entity": { + "id": 10400, + "label": "linode123456", + "type": "linode", + "url": "/v4/linode/instances/123456" + }, + "gravatar_id": "474a1b7373ae0be4132649e69c36ce30", + "id": 123, + "opened": "2015-06-04T14:16:44", + "opened_by": "some_user", + "status": "open", + "summary": "Having trouble resetting root password on my Linode\n", + "updated": "2015-06-04T16:07:03", + "updated_by": "some_other_user" + } diff --git a/test/fixtures/tags.json b/test/fixtures/tags.json new file mode 100644 index 000000000..abe9e92e6 --- /dev/null +++ b/test/fixtures/tags.json @@ -0,0 +1,13 @@ +{ + "page": 1, + "pages": 1, + "results": 2, + "data": [ + { + "label": "nothing" + }, + { + "label": "something" + } + ] +} diff --git a/test/fixtures/tags_nothing.json b/test/fixtures/tags_nothing.json new file mode 100644 index 000000000..c3be36294 --- /dev/null +++ b/test/fixtures/tags_nothing.json @@ -0,0 +1,6 @@ +{ + "page": 1, + "pages": 1, + "results": 0, + "data": [] +} diff --git a/test/fixtures/tags_something.json b/test/fixtures/tags_something.json new file mode 100644 index 000000000..7cce51301 --- /dev/null +++ b/test/fixtures/tags_something.json @@ -0,0 +1,95 @@ +{ + "page": 1, + "pages": 1, + "results": 1, + "data": [ + { + "type": "linode", + "data": { + "group": "test", + "hypervisor": "kvm", + "id": 123, + "status": "running", + "type": "g6-standard-1", + "alerts": { + "network_in": 5, + "network_out": 5, + "cpu": 90, + "transfer_quota": 80, + "io": 5000 + }, + "label": "linode123", + "backups": { + "enabled": true, + "schedule": { + "window": "W02", + "day": "Scheduling" + } + }, + "specs": { + "memory": 2048, + "disk": 30720, + "vcpus": 1, + "transfer": 2000 + }, + "ipv6": "1234:abcd::1234:abcd:89ef:67cd/64", + "created": "2017-01-01T00:00:00", + "region": "us-east-1a", + "ipv4": [ + "123.45.67.89" + ], + "updated": "2017-01-01T00:00:00", + "image": "linode/ubuntu17.04", + "tags": ["something"] + } + }, + { + "type": "domain", + "data": { + "domain": "example.org", + "type": "master", + "id": 12345, + "axfr_ips": [], + "retry_sec": 0, + "ttl_sec": 300, + "status": "active", + "master_ips": [], + "description": "", + "group": "", + "expire_sec": 0, + "soa_email": "test@example.org", + "refresh_sec": 0, + "tags": ["something"] + } + }, + { + "type": "nodebalancer", + "data": { + "created": "2018-01-01T00:01:01", + "ipv6": "c001:d00d:b01::1:abcd:1234", + "region": "us-east-1a", + "ipv4": "12.34.56.789", + "hostname": "nb-12-34-56-789.newark.nodebalancer.linode.com", + "id": 123456, + "updated": "2018-01-01T00:01:01", + "label": "balancer123456", + "client_conn_throttle": 0, + "tags": ["something"] + } + }, + { + "type": "volume", + "data": { + "id": 1, + "label": "block1", + "created": "2017-08-04T03:00:00", + "region": "us-east-1a", + "linode_id": null, + "size": 40, + "updated": "2017-08-04T04:00:00", + "status": "active", + "tags": ["something"] + } + } + ] +} diff --git a/test/fixtures/testmappedobj1.json b/test/fixtures/testmappedobj1.json new file mode 100644 index 000000000..0914c1ded --- /dev/null +++ b/test/fixtures/testmappedobj1.json @@ -0,0 +1,3 @@ +{ + "bar": "bar" +} \ No newline at end of file diff --git a/test/fixtures/volumes.json b/test/fixtures/volumes.json new file mode 100644 index 000000000..2e8c86338 --- /dev/null +++ b/test/fixtures/volumes.json @@ -0,0 +1,64 @@ +{ + "data": [ + { + "id": 1, + "label": "block1", + "created": "2017-08-04T03:00:00", + "region": "us-east-1a", + "linode_id": null, + "size": 40, + "updated": "2017-08-04T04:00:00", + "status": "active", + "tags": ["something"], + "filesystem_path": "this/is/a/file/path", + "hardware_type": "hdd", + "linode_label": null + }, + { + "id": 2, + "label": "block2", + "created": "2017-08-06T17:00:00", + "region": "ap-south-1a", + "linode_id": null, + "size": 100, + "updated": "2017-08-07T04:00:00", + "status": "active", + "tags": [], + "filesystem_path": "this/is/a/file/path", + "hardware_type": "nvme", + "linode_label": null + }, + { + "id": 3, + "label": "block3", + "created": "2017-08-06T17:00:00", + "region": "ap-south-1a", + "linode_id": 1, + "size": 200, + "updated": "2017-08-07T04:00:00", + "status": "active", + "tags": ["attached"], + "filesystem_path": "this/is/a/file/path", + "hardware_type": "nvme", + "linode_label": "some_label" + }, + { + "id": 4, + "label": "block4", + "created": "2017-08-04T03:00:00", + "region": "ap-west-1a", + "linode_id": null, + "size": 40, + "updated": "2017-08-04T04:00:00", + "status": "active", + "tags": ["something"], + "filesystem_path": "this/is/a/file/path", + "hardware_type": "hdd", + "linode_label": null, + "encryption": "enabled" + } + ], + "results": 4, + "pages": 1, + "page": 1 +} diff --git a/test/fixtures/volumes_types.json b/test/fixtures/volumes_types.json new file mode 100644 index 000000000..9b975506e --- /dev/null +++ b/test/fixtures/volumes_types.json @@ -0,0 +1,28 @@ +{ + "data": [ + { + "id": "volume", + "label": "Storage Volume", + "price": { + "hourly": 0.00015, + "monthly": 0.1 + }, + "region_prices": [ + { + "id": "id-cgk", + "hourly": 0.00018, + "monthly": 0.12 + }, + { + "id": "br-gru", + "hourly": 0.00021, + "monthly": 0.14 + } + ], + "transfer": 0 + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/fixtures/vpcs.json b/test/fixtures/vpcs.json new file mode 100644 index 000000000..822f3bae1 --- /dev/null +++ b/test/fixtures/vpcs.json @@ -0,0 +1,20 @@ +{ + "data": [ + { + "label": "test-vpc", + "id": 123456, + "description": "A very real VPC.", + "region": "us-southeast", + "ipv6": [ + { + "range": "fd71:1140:a9d0::/52" + } + ], + "created": "2018-01-01T00:01:01", + "updated": "2018-01-01T00:01:01" + } + ], + "results": 1, + "page": 1, + "pages": 1 +} diff --git a/test/fixtures/vpcs_123456.json b/test/fixtures/vpcs_123456.json new file mode 100644 index 000000000..af6d2cff8 --- /dev/null +++ b/test/fixtures/vpcs_123456.json @@ -0,0 +1,13 @@ +{ + "label": "test-vpc", + "id": 123456, + "description": "A very real VPC.", + "region": "us-southeast", + "ipv6": [ + { + "range": "fd71:1140:a9d0::/52" + } + ], + "created": "2018-01-01T00:01:01", + "updated": "2018-01-01T00:01:01" +} \ No newline at end of file diff --git a/test/fixtures/vpcs_123456_ips.json b/test/fixtures/vpcs_123456_ips.json new file mode 100644 index 000000000..10cb94f3c --- /dev/null +++ b/test/fixtures/vpcs_123456_ips.json @@ -0,0 +1,44 @@ +{ + "data": [ + { + "address": "10.0.0.2", + "address_range": null, + "vpc_id": 123456, + "subnet_id": 654321, + "region": "us-ord", + "linode_id": 111, + "config_id": 222, + "interface_id": 333, + "active": true, + "nat_1_1": null, + "gateway": "10.0.0.1", + "prefix": 8, + "subnet_mask": "255.0.0.0" + }, + { + "address": "10.0.0.3", + "address_range": null, + "vpc_id": 41220, + "subnet_id": 41184, + "region": "us-ord", + "linode_id": 56323949, + "config_id": 59467106, + "interface_id": 1248358, + "active": true, + "nat_1_1": null, + "gateway": "10.0.0.1", + "prefix": 8, + "subnet_mask": "255.0.0.0" + }, + { + "ipv6_range": "fd71:1140:a9d0::/52", + "ipv6_is_public": true, + "ipv6_addresses": [ + { + "slaac_address": "fd71:1140:a9d0::/52" + } + ], + "vpc_id": 123456 + } + ] +} diff --git a/test/fixtures/vpcs_123456_subnets.json b/test/fixtures/vpcs_123456_subnets.json new file mode 100644 index 000000000..8239daec2 --- /dev/null +++ b/test/fixtures/vpcs_123456_subnets.json @@ -0,0 +1,45 @@ +{ + "data": [ + { + "label": "test-subnet", + "id": 789, + "ipv4": "10.0.0.0/24", + "ipv6": [ + { + "range": "fd71:1140:a9d0::/52" + } + ], + "linodes": [ + { + "id": 12345, + "interfaces": [ + { + "id": 678, + "active": true, + "config_id": null + }, + { + "id": 543, + "active": false, + "config_id": null + } + ] + } + ], + "databases": [ + { + "id": 12345, + "ipv4_range": "10.0.0.0/24", + "ipv6_ranges": [ + "2001:db8::/64" + ] + } + ], + "created": "2018-01-01T00:01:01", + "updated": "2018-01-01T00:01:01" + } + ], + "results": 1, + "page": 1, + "pages": 1 +} \ No newline at end of file diff --git a/test/fixtures/vpcs_123456_subnets_789.json b/test/fixtures/vpcs_123456_subnets_789.json new file mode 100644 index 000000000..199156130 --- /dev/null +++ b/test/fixtures/vpcs_123456_subnets_789.json @@ -0,0 +1,38 @@ +{ + "label": "test-subnet", + "id": 789, + "ipv4": "10.0.0.0/24", + "ipv6": [ + { + "range": "fd71:1140:a9d0::/52" + } + ], + "linodes": [ + { + "id": 12345, + "interfaces": [ + { + "id": 678, + "active": true, + "config_id": null + }, + { + "id": 543, + "active": false, + "config_id": null + } + ] + } + ], + "databases": [ + { + "id": 12345, + "ipv4_range": "10.0.0.0/24", + "ipv6_ranges": [ + "2001:db8::/64" + ] + } + ], + "created": "2018-01-01T00:01:01", + "updated": "2018-01-01T00:01:01" +} \ No newline at end of file diff --git a/test/fixtures/vpcs_ips.json b/test/fixtures/vpcs_ips.json new file mode 100644 index 000000000..7849f5d76 --- /dev/null +++ b/test/fixtures/vpcs_ips.json @@ -0,0 +1,32 @@ +{ + "data": [ + { + "address": "10.0.0.2", + "address_range": null, + "vpc_id": 123, + "subnet_id": 456, + "region": "us-mia", + "linode_id": 123, + "config_id": 456, + "interface_id": 789, + "active": true, + "nat_1_1": "172.233.179.133", + "gateway": "10.0.0.1", + "prefix": 24, + "subnet_mask": "255.255.255.0" + }, + { + "ipv6_range": "fd71:1140:a9d0::/52", + "ipv6_is_public": true, + "ipv6_addresses": [ + { + "slaac_address": "fd71:1140:a9d0::/52" + } + ], + "vpc_id": 123456 + } + ], + "page": 1, + "pages": 1, + "results": 1 +} \ No newline at end of file diff --git a/test/integration/__init__.py b/test/integration/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/integration/conftest.py b/test/integration/conftest.py new file mode 100644 index 000000000..a5c832f4f --- /dev/null +++ b/test/integration/conftest.py @@ -0,0 +1,730 @@ +import ipaddress +import logging +import os +import random +import time +from test.integration.helpers import ( + get_test_label, + send_request_when_resource_available, + wait_for_condition, +) +from test.integration.models.database.helpers import get_db_engine_id +from typing import Optional, Set + +import pytest +import requests +from requests.exceptions import ConnectionError, RequestException + +from linode_api4 import ( + ExplicitNullValue, + InterfaceGeneration, + LinodeInterfaceDefaultRouteOptions, + LinodeInterfaceOptions, + LinodeInterfacePublicOptions, + LinodeInterfaceVLANOptions, + LinodeInterfaceVPCOptions, + PlacementGroupPolicy, + PlacementGroupType, + PostgreSQLDatabase, +) +from linode_api4.errors import ApiError +from linode_api4.linode_client import LinodeClient, MonitorClient +from linode_api4.objects import Region + +ENV_TOKEN_NAME = "LINODE_TOKEN" +ENV_API_URL_NAME = "LINODE_API_URL" +ENV_REGION_OVERRIDE = "LINODE_TEST_REGION_OVERRIDE" +ENV_API_CA_NAME = "LINODE_API_CA" +RUN_LONG_TESTS = "RUN_LONG_TESTS" +SKIP_E2E_FIREWALL = "SKIP_E2E_FIREWALL" + +ALL_ACCOUNT_AVAILABILITIES = { + "Linodes", + "NodeBalancers", + "Block Storage", + "Kubernetes", +} + +logger = logging.getLogger(__name__) + + +def get_token(): + return os.environ.get(ENV_TOKEN_NAME, None) + + +def get_api_url(): + return os.environ.get(ENV_API_URL_NAME, "https://api.linode.com/v4beta") + + +def get_regions( + client: LinodeClient, + capabilities: Optional[Set[str]] = None, + site_type: Optional[str] = None, +): + region_override = os.environ.get(ENV_REGION_OVERRIDE) + + # Allow overriding the target test region + if region_override is not None: + return Region(client, region_override) + + regions = client.regions() + + account_regional_availabilities = {} + try: + account_availabilities = client.account.availabilities() + for availability in account_availabilities: + account_regional_availabilities[availability.region] = ( + availability.available + ) + except ApiError: + logger.warning( + "Failed to retrieve account availabilities for regions. " + "Assuming required capabilities are available in all regions for this account. " + "Tests may fail if the account lacks access to necessary capabilities in the selected region." + ) + + if capabilities is not None: + required_capabilities = set(capabilities) + required_account_capabilities = required_capabilities.intersection( + ALL_ACCOUNT_AVAILABILITIES + ) + + regions = [ + v + for v in regions + if required_capabilities.issubset(v.capabilities) + and required_account_capabilities.issubset( + account_regional_availabilities.get( + v.id, + ( + [] + if account_regional_availabilities + else ALL_ACCOUNT_AVAILABILITIES + ), + ) + ) + ] + + if site_type is not None: + regions = [v for v in regions if v.site_type == site_type] + + return regions + + +def get_region( + client: LinodeClient, capabilities: Set[str] = None, site_type: str = "core" +): + return random.choice(get_regions(client, capabilities, site_type)) + + +def get_api_ca_file(): + result = os.environ.get(ENV_API_CA_NAME, None) + return result if result != "" else None + + +def run_long_tests(): + return os.environ.get(RUN_LONG_TESTS, None) + + +@pytest.fixture(autouse=True, scope="session") +def e2e_test_firewall(test_linode_client): + # Allow skipping firewall creation for local runs: set SKIP_E2E_FIREWALL=1 + if os.environ.get(SKIP_E2E_FIREWALL): + # Yield None so fixtures depending on this receive a falsy value but the session continues. + yield None + return + + def is_valid_ipv4(address): + try: + ipaddress.IPv4Address(address) + return True + except ipaddress.AddressValueError: + return False + + def is_valid_ipv6(address): + try: + ipaddress.IPv6Address(address) + return True + except ipaddress.AddressValueError: + return False + + def get_public_ip(ip_version: str = "ipv4", retries: int = 3): + url = ( + f"https://api64.ipify.org?format=json" + if ip_version == "ipv6" + else f"https://api.ipify.org?format=json" + ) + for attempt in range(retries): + try: + response = requests.get(url) + response.raise_for_status() + return str(response.json()["ip"]) + except (RequestException, ConnectionError) as e: + if attempt < retries - 1: + time.sleep(2) # Wait before retrying + else: + raise e + + def create_inbound_rule(ipv4_address, ipv6_address): + rule = [ + { + "protocol": "TCP", + "ports": "22", + "addresses": {}, + "action": "ACCEPT", + } + ] + if is_valid_ipv4(ipv4_address): + rule[0]["addresses"]["ipv4"] = [f"{ipv4_address}/32"] + + if is_valid_ipv6(ipv6_address): + rule[0]["addresses"]["ipv6"] = [f"{ipv6_address}/128"] + + return rule + + try: + ipv4_address = get_public_ip("ipv4") + except (RequestException, ConnectionError, ValueError, KeyError): + ipv4_address = None + + try: + ipv6_address = get_public_ip("ipv6") + except (RequestException, ConnectionError, ValueError, KeyError): + ipv6_address = None + + inbound_rule = [] + if ipv4_address or ipv6_address: + inbound_rule = create_inbound_rule(ipv4_address, ipv6_address) + + client = test_linode_client + + rules = { + "outbound": [], + "outbound_policy": "ACCEPT", + "inbound": inbound_rule, + "inbound_policy": "DROP", + } + + label = "cloud_firewall_" + str(int(time.time())) + + firewall = client.networking.firewall_create( + label=label, rules=rules, status="enabled" + ) + + yield firewall + + firewall.delete() + + +@pytest.fixture(scope="session") +def create_linode(test_linode_client, e2e_test_firewall): + client = test_linode_client + + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label(length=8) + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label, + firewall=e2e_test_firewall, + ) + + yield linode_instance + + linode_instance.delete() + + +@pytest.fixture +def create_linode_for_pass_reset(test_linode_client, e2e_test_firewall): + client = test_linode_client + + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label(length=8) + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label, + firewall=e2e_test_firewall, + ) + + yield linode_instance, password + + linode_instance.delete() + + +@pytest.fixture(scope="session") +def ssh_key_gen(): + output = os.popen("ssh-keygen -q -t rsa -f ./sdk-sshkey -q -N ''") + + time.sleep(1) + + pub_file = open("./sdk-sshkey.pub", "r") + pub_key = pub_file.read().rstrip() + + priv_file = open("./sdk-sshkey", "r") + priv_key = priv_file.read().rstrip() + + yield pub_key, priv_key + + os.popen("rm ./sdk-sshkey*") + + +@pytest.fixture(scope="session") +def test_linode_client(): + token = get_token() + api_url = get_api_url() + api_ca_file = get_api_ca_file() + client = LinodeClient( + token, + base_url=api_url, + ca_path=api_ca_file, + ) + return client + + +@pytest.fixture +def test_account_settings(test_linode_client): + client = test_linode_client + account_settings = client.account.settings() + account_settings._populated = True + account_settings.network_helper = True + + account_settings.save() + + +@pytest.fixture(scope="session") +def test_domain(test_linode_client): + client = test_linode_client + + timestamp = str(time.time_ns()) + domain_addr = timestamp + "-example.com" + soa_email = "pathiel-test123@linode.com" + + domain = client.domain_create( + domain=domain_addr, soa_email=soa_email, tags=["test-tag"] + ) + + # Create a SRV record + domain.record_create( + "SRV", + target="rc_test", + priority=10, + weight=5, + port=80, + service="service_test", + ) + + yield domain + + domain.delete() + + +@pytest.fixture(scope="session") +def test_volume(test_linode_client): + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label(length=8) + + volume = client.volume_create(label=label, region=region) + + yield volume + + send_request_when_resource_available(timeout=100, func=volume.delete) + + +@pytest.fixture(scope="session") +def test_volume_with_encryption(test_linode_client): + client = test_linode_client + region = get_region(client, {"Block Storage Encryption"}) + label = get_test_label(length=8) + + volume = client.volume_create( + label=label, region=region, encryption="enabled" + ) + + yield volume + + send_request_when_resource_available(timeout=100, func=volume.delete) + + +@pytest.fixture +def test_tag(test_linode_client): + client = test_linode_client + + label = get_test_label(length=8) + + tag = client.tag_create(label=label) + + yield tag + + tag.delete() + + +@pytest.fixture +def test_nodebalancer(test_linode_client): + client = test_linode_client + + label = get_test_label(length=8) + + nodebalancer = client.nodebalancer_create( + region=get_region(client, capabilities={"NodeBalancers"}), label=label + ) + + yield nodebalancer + + nodebalancer.delete() + + +@pytest.fixture +def test_longview_client(test_linode_client): + client = test_linode_client + label = get_test_label(length=8) + longview_client = client.longview.client_create(label=label) + + yield longview_client + + longview_client.delete() + + +@pytest.fixture +def test_sshkey(test_linode_client, ssh_key_gen): + pub_key = ssh_key_gen[0] + client = test_linode_client + key_label = get_test_label(8) + "_key" + key = client.profile.ssh_key_upload(pub_key, key_label) + + yield key + + key.delete() + + +@pytest.fixture +def access_keys_object_storage(test_linode_client): + client = test_linode_client + label = get_test_label(length=8) + key = client.object_storage.keys_create(label) + + yield key + + key.delete() + + +@pytest.fixture(scope="session") +def test_firewall(test_linode_client): + client = test_linode_client + rules = { + "outbound": [], + "outbound_policy": "DROP", + "inbound": [], + "inbound_policy": "ACCEPT", + } + + label = get_test_label(8) + "_firewall" + + firewall = client.networking.firewall_create( + label=label, rules=rules, status="enabled" + ) + + yield firewall + + firewall.delete() + + +@pytest.fixture +def test_oauth_client(test_linode_client): + client = test_linode_client + label = get_test_label(length=8) + "_oauth" + + oauth_client = client.account.oauth_client_create( + label, "https://localhost/oauth/callback" + ) + + yield oauth_client + + oauth_client.delete() + + +@pytest.fixture(scope="session") +def create_vpc(test_linode_client): + client = test_linode_client + + label = get_test_label(length=10) + + vpc = client.vpcs.create( + label=label, + region=get_region( + test_linode_client, {"VPCs", "VPC IPv6 Stack", "Linode Interfaces"} + ), + description="test description", + ipv6=[{"range": "auto"}], + ) + yield vpc + + vpc.delete() + + +@pytest.fixture(scope="session") +def create_vpc_with_subnet(test_linode_client, create_vpc): + subnet = create_vpc.subnet_create( + label="test-subnet", + ipv4="10.0.0.0/24", + ipv6=[{"range": "auto"}], + ) + + yield create_vpc, subnet + + subnet.delete() + + +@pytest.fixture(scope="session") +def create_vpc_with_subnet_and_linode( + test_linode_client, create_vpc_with_subnet, e2e_test_firewall +): + vpc, subnet = create_vpc_with_subnet + + label = get_test_label(length=8) + + instance, password = test_linode_client.linode.instance_create( + "g6-standard-1", + vpc.region, + image="linode/debian11", + label=label, + firewall=e2e_test_firewall, + ) + + yield vpc, subnet, instance, password + + instance.delete() + + +@pytest.fixture(scope="session") +def create_multiple_vpcs(test_linode_client): + client = test_linode_client + + label = get_test_label(length=10) + + label_2 = get_test_label(length=10) + + vpc_1 = client.vpcs.create( + label, + get_region(test_linode_client, {"VPCs"}), + description="test description", + ) + + vpc_2 = client.vpcs.create( + label_2, + get_region(test_linode_client, {"VPCs"}), + description="test description", + ) + + yield vpc_1, vpc_2 + + vpc_1.delete() + + vpc_2.delete() + + +@pytest.fixture(scope="session") +def create_placement_group(test_linode_client): + client = test_linode_client + + label = get_test_label(10) + + pg = client.placement.group_create( + label, + get_region(test_linode_client, {"Placement Group"}), + PlacementGroupType.anti_affinity_local, + PlacementGroupPolicy.flexible, + ) + yield pg + + pg.delete() + + +@pytest.fixture(scope="session") +def create_placement_group_with_linode( + test_linode_client, create_placement_group +): + client = test_linode_client + + inst = client.linode.instance_create( + "g6-nanode-1", + create_placement_group.region, + label=create_placement_group.label, + placement_group=create_placement_group, + ) + + create_placement_group.invalidate() + + yield create_placement_group, inst + + inst.delete() + + +@pytest.mark.smoke +def pytest_configure(config): + config.addinivalue_line( + "markers", + "smoke: mark test as part of smoke test suite", + ) + + +@pytest.fixture(scope="session") +def linode_for_vlan_tests(test_linode_client, e2e_test_firewall): + client = test_linode_client + region = get_region(client, {"Linodes", "Vlans"}, site_type="core") + label = get_test_label(length=8) + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label, + firewall=e2e_test_firewall, + ) + + yield linode_instance + + linode_instance.delete() + + +@pytest.fixture(scope="function") +def linode_with_interface_generation_linode( + test_linode_client, + e2e_test_firewall, + # We won't be using this all the time, but it's + # necessary for certain consumers of this fixture + create_vpc_with_subnet, +): + client = test_linode_client + + label = get_test_label() + + instance = client.linode.instance_create( + "g6-nanode-1", + create_vpc_with_subnet[0].region, + label=label, + interface_generation=InterfaceGeneration.LINODE, + booted=False, + ) + + yield instance + + instance.delete() + + +@pytest.fixture(scope="function") +def linode_with_linode_interfaces( + test_linode_client, e2e_test_firewall, create_vpc_with_subnet +): + client = test_linode_client + vpc, subnet = create_vpc_with_subnet + + # Are there regions where VPCs are supported but Linode Interfaces aren't? + region = vpc.region + label = get_test_label() + + instance, _ = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label, + booted=False, + interface_generation=InterfaceGeneration.LINODE, + interfaces=[ + LinodeInterfaceOptions( + firewall_id=e2e_test_firewall.id, + default_route=LinodeInterfaceDefaultRouteOptions( + ipv4=True, + ipv6=True, + ), + public=LinodeInterfacePublicOptions(), + ), + LinodeInterfaceOptions( + firewall_id=ExplicitNullValue, + vpc=LinodeInterfaceVPCOptions( + subnet_id=subnet.id, + ), + ), + LinodeInterfaceOptions( + vlan=LinodeInterfaceVLANOptions( + vlan_label="test-vlan", ipam_address="10.0.0.5/32" + ), + ), + ], + ) + + yield instance + + instance.delete() + + +@pytest.fixture(scope="session") +def test_create_postgres_db(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "postgresql") + dbtype = "g6-standard-1" + + db = client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + ) + + def get_db_status(): + return db.status == "active" + + # TAKES 15-30 MINUTES TO FULLY PROVISION DB + wait_for_condition(60, 2000, get_db_status) + + yield db + + send_request_when_resource_available(300, db.delete) + + +@pytest.fixture(scope="session") +def get_monitor_token_for_db_entities( + test_linode_client, test_create_postgres_db +): + client = test_linode_client + + dbs = client.database.postgresql_instances() + + if len(dbs) < 1: + db_id = test_create_postgres_db.id + else: + db_id = dbs[0].id + + region = client.load(PostgreSQLDatabase, db_id).region + dbs = client.database.instances() + + # only collect entity_ids in the same region + entity_ids = [db.id for db in dbs if db.region == region] + + # create token for the particular service + token = client.monitor.create_token( + service_type="dbaas", entity_ids=entity_ids + ) + + yield token, entity_ids + + +@pytest.fixture(scope="session") +def test_monitor_client(get_monitor_token_for_db_entities): + api_ca_file = get_api_ca_file() + token, entity_ids = get_monitor_token_for_db_entities + + client = MonitorClient( + token.token, + ca_path=api_ca_file, + ) + + return client, entity_ids diff --git a/test/integration/filters/fixtures.py b/test/integration/filters/fixtures.py new file mode 100644 index 000000000..e753236dd --- /dev/null +++ b/test/integration/filters/fixtures.py @@ -0,0 +1,37 @@ +from test.integration.conftest import get_region +from test.integration.helpers import get_test_label + +import pytest + + +@pytest.fixture(scope="package") +def domain_instance(test_linode_client): + client = test_linode_client + + domain_addr = get_test_label(5) + "-example.com" + soa_email = "dx-test-email@linode.com" + + domain = client.domain_create(domain=domain_addr, soa_email=soa_email) + + yield domain + + domain.delete() + + +@pytest.fixture(scope="package") +def lke_cluster_instance(test_linode_client): + node_type = test_linode_client.linode.types()[1] # g6-standard-1 + version = test_linode_client.lke.versions()[0] + + region = get_region(test_linode_client, {"Kubernetes", "Disk Encryption"}) + + node_pool = test_linode_client.lke.node_pool(node_type, 3) + label = get_test_label() + "_cluster" + + cluster = test_linode_client.lke.cluster_create( + region, label, version, [node_pool] + ) + + yield cluster + + cluster.delete() diff --git a/test/integration/filters/model_filters_test.py b/test/integration/filters/model_filters_test.py new file mode 100644 index 000000000..55bed6ac3 --- /dev/null +++ b/test/integration/filters/model_filters_test.py @@ -0,0 +1,85 @@ +from test.integration.filters.fixtures import ( # noqa: F401 + domain_instance, + lke_cluster_instance, +) + +from linode_api4.objects import ( + DatabaseEngine, + DatabaseType, + Domain, + Firewall, + Image, + LKECluster, + Type, +) + + +def test_database_type_model_filter(test_linode_client): + client = test_linode_client + + db_disk = client.database.types()[0].disk + + filtered_db_type = client.database.types(DatabaseType.disk == db_disk) + + assert db_disk == filtered_db_type[0].disk + + +def test_database_engine_model_filter(test_linode_client): + client = test_linode_client + + engine = "mysql" + + filtered_db_engine = client.database.engines( + DatabaseEngine.engine == engine + ) + + assert len(client.database.engines()) > len(filtered_db_engine) + + +def test_domain_model_filter(test_linode_client, domain_instance): + client = test_linode_client + + filtered_domain = client.domains(Domain.domain == domain_instance.domain) + + assert domain_instance.id == filtered_domain[0].id + + +def test_image_model_filter(test_linode_client): + client = test_linode_client + + filtered_images = client.images(Image.label.contains("Debian")) + + assert len(client.images()) > len(filtered_images) + + +def test_linode_type_model_filter(test_linode_client): + client = test_linode_client + + filtered_types = client.linode.types(Type.label.contains("Linode")) + + assert len(filtered_types) > 0 + assert "Linode" in filtered_types[0].label + + +def test_lke_cluster_model_filter(test_linode_client, lke_cluster_instance): + client = test_linode_client + lke_cluster = lke_cluster_instance + + filtered_cluster = client.lke.clusters( + LKECluster.label.contains(lke_cluster.label) + ) + + assert filtered_cluster[0].id == lke_cluster.id + + +def test_networking_firewall_model_filter( + test_linode_client, e2e_test_firewall +): + client = test_linode_client + + filtered_firewall = client.networking.firewalls( + Firewall.label.contains(e2e_test_firewall.label) + ) + + assert len(filtered_firewall) > 0 + assert e2e_test_firewall.label in filtered_firewall[0].label diff --git a/test/integration/helpers.py b/test/integration/helpers.py new file mode 100644 index 000000000..969ca70a9 --- /dev/null +++ b/test/integration/helpers.py @@ -0,0 +1,59 @@ +import random +import time +from string import ascii_lowercase +from typing import Callable + +from linode_api4.errors import ApiError + + +def get_test_label(length: int = 8): + return "".join(random.choice(ascii_lowercase) for i in range(length)) + + +def wait_for_condition( + interval: int, timeout: int, condition: Callable, *args +) -> object: + end_time = time.time() + timeout + while time.time() < end_time: + result = condition(*args) + if result: + return result + time.sleep(interval) + raise TimeoutError( + f"Timeout Error: resource not available in {timeout} seconds" + ) + + +# Retry function to help in case of requests sending too quickly before instance is ready +def retry_sending_request( + retries: int, condition: Callable, *args, backoff: int = 5, **kwargs +) -> object: + for attempt in range(1, retries + 1): + try: + return condition(*args, **kwargs) + except ApiError as e: + if attempt == retries: + raise Exception( + "Api Error: Failed after all retry attempts" + ) from e + time.sleep(backoff) + + +def send_request_when_resource_available( + timeout: int, func: Callable, *args, **kwargs +) -> object: + start_time = time.time() + retry_statuses = {400, 500, 503} + + while True: + try: + return func(*args, **kwargs) + except ApiError as e: + if e.status in retry_statuses or "Please try again later" in str(e): + if time.time() - start_time > timeout: + raise TimeoutError( + f"Timeout Error: resource not available in {timeout} seconds" + ) + time.sleep(10) + else: + raise e diff --git a/test/integration/linode_client/__init__.py b/test/integration/linode_client/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/integration/linode_client/test_errors.py b/test/integration/linode_client/test_errors.py new file mode 100644 index 000000000..2c3ab57b5 --- /dev/null +++ b/test/integration/linode_client/test_errors.py @@ -0,0 +1,28 @@ +from linode_api4.errors import ApiError + + +def test_error_404(test_linode_client): + api_exc = None + + try: + test_linode_client.get("/invalid/endpoint") + except ApiError as exc: + api_exc = exc + + assert str(api_exc) == "GET /v4beta/invalid/endpoint: [404] Not found" + + +def test_error_400(test_linode_client): + api_exc = None + + try: + test_linode_client.linode.instance_create( + "g6-fake-plan", "us-fakeregion" + ) + except ApiError as exc: + api_exc = exc + + assert str(api_exc) == ( + "POST /v4beta/linode/instances: [400] type: A valid plan type by that ID was not found; " + "region: region is not valid" + ) diff --git a/test/integration/linode_client/test_linode_client.py b/test/integration/linode_client/test_linode_client.py new file mode 100644 index 000000000..4060064d3 --- /dev/null +++ b/test/integration/linode_client/test_linode_client.py @@ -0,0 +1,441 @@ +import re +import time +from test.integration.conftest import get_region +from test.integration.helpers import get_test_label + +import pytest + +from linode_api4 import ApiError +from linode_api4.objects import ConfigInterface, ObjectStorageKeys, Region + + +@pytest.fixture(scope="session") +def setup_client_and_linode(test_linode_client, e2e_test_firewall): + client = test_linode_client + region = get_region(client, {"Kubernetes", "NodeBalancers"}, "core").id + + label = get_test_label() + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label, + firewall=e2e_test_firewall, + ) + + yield client, linode_instance + + linode_instance.delete() + + +def test_get_account(setup_client_and_linode): + client = setup_client_and_linode[0] + account = client.account() + + assert re.search("^$|[a-zA-Z]+", account.first_name) + assert re.search("^$|[a-zA-Z]+", account.last_name) + assert re.search( + "^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+.[a-zA-Z0-9-.]+$", account.email + ) + assert re.search("^$|[a-zA-Z0-9]+", account.address_1) + assert re.search("^$|[a-zA-Z0-9]+", account.address_2) + assert re.search("^$|[a-zA-Z]+", account.city) + assert re.search("^$|[a-zA-Z]+", account.state) + assert re.search("^$|[a-zA-Z]+", account.country) + assert re.search("^$|[a-zA-Z0-9]+", account.zip) + if account.tax_id: + assert re.search("^$|[0-9]+", account.tax_id) + + +@pytest.mark.smoke +def test_fails_to_create_domain_without_soa_email(setup_client_and_linode): + client = setup_client_and_linode[0] + + timestamp = str(time.time_ns()) + domain_addr = timestamp + "example.com" + try: + domain = client.domain_create(domain=domain_addr) + except ApiError as e: + assert e.status == 400 + + +@pytest.mark.smoke +def test_get_domains(test_linode_client, test_domain): + client = test_linode_client + domain = test_domain + domain_dict = client.domains() + + dom_list = [i.domain for i in domain_dict] + + assert domain.domain in dom_list + + +@pytest.mark.smoke +def test_get_regions(test_linode_client): + client = test_linode_client + regions = client.regions() + + region_list = [r.id for r in regions] + + test_region = Region(client, "us-east") + + assert test_region.id in region_list + assert test_region.site_type in ["core", "edge"] + + +@pytest.mark.smoke +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_image_create(setup_client_and_linode): + client = setup_client_and_linode[0] + linode = setup_client_and_linode[1] + + label = get_test_label() + description = "Test description" + tags = ["test"] + usable_disk = [v for v in linode.disks if v.filesystem != "swap"] + + image = client.image_create( + disk=usable_disk[0].id, label=label, description=description, tags=tags + ) + + assert image.label == label + assert image.description == description + assert image.tags == tags + # size and total_size are the same because this image is not replicated + assert image.size == image.total_size + + +def test_fails_to_create_image_with_non_existing_disk_id( + setup_client_and_linode, +): + client = setup_client_and_linode[0] + + label = get_test_label() + description = "Test description" + disk_id = 111111 + + try: + image_page = client.image_create( + disk=disk_id, label=label, description=description + ) + except ApiError as e: + assert "Not found" in str(e.json) + assert e.status == 404 + + +def test_fails_to_delete_predefined_images(setup_client_and_linode): + client = setup_client_and_linode[0] + + images = client.images() + + try: + # new images go on top of the list thus choose last image + images.last().delete() + except ApiError as e: + assert "Unauthorized" in str(e.json) + assert e.status == 403 + + +def test_get_volume(test_linode_client, test_volume): + client = test_linode_client + label = test_volume.label + + volume_dict = client.volumes() + + volume_label_list = [i.label for i in volume_dict] + + assert label in volume_label_list + + +def test_get_tag(test_linode_client, test_tag): + client = test_linode_client + label = test_tag.label + + tags = client.tags() + + tag_label_list = [i.label for i in tags] + + assert label in tag_label_list + + +def test_create_tag_with_id( + setup_client_and_linode, test_nodebalancer, test_domain, test_volume +): + client = setup_client_and_linode[0] + linode = setup_client_and_linode[1] + nodebalancer = test_nodebalancer + domain = test_domain + volume = test_volume + + label = get_test_label() + + tag = client.tag_create( + label=label, + instances=[linode.id, linode], + nodebalancers=[nodebalancer.id, nodebalancer], + domains=[domain.id, domain], + volumes=[volume.id, volume], + ) + + # Get tags after creation + tags = client.tags() + + tag_label_list = [i.label for i in tags] + + tag.delete() + + assert label in tag_label_list + + +@pytest.mark.smoke +def test_create_tag_with_entities( + setup_client_and_linode, test_nodebalancer, test_domain, test_volume +): + client = setup_client_and_linode[0] + linode = setup_client_and_linode[1] + nodebalancer = test_nodebalancer + domain = test_domain + volume = test_volume + + label = get_test_label() + + tag = client.tag_create( + label, entities=[linode, domain, nodebalancer, volume] + ) + + # Get tags after creation + tags = client.tags() + + tag_label_list = [i.label for i in tags] + + tag.delete() + + assert label in tag_label_list + + +# AccountGroupTests +def test_get_account_settings(test_linode_client): + client = test_linode_client + account_settings = client.account.settings() + + assert account_settings._populated == True + assert re.search( + r"'network_helper':\s*(True|False)", str(account_settings._raw_json) + ) + + +# TODO: Account invoice and payment test cases need to be added + + +# LinodeGroupTests +def test_create_linode_instance_without_image(test_linode_client): + client = test_linode_client + region = get_region(client, {"Linodes"}, "core").id + label = get_test_label() + + linode_instance = client.linode.instance_create( + "g6-nanode-1", region, label=label + ) + + assert linode_instance.label == label + assert linode_instance.image is None + + res = linode_instance.delete() + + assert res + + +@pytest.mark.smoke +def test_create_linode_instance_with_image(setup_client_and_linode): + linode = setup_client_and_linode[1] + + assert re.search("linode/debian12", str(linode.image)) + + +def test_create_linode_with_interfaces(test_linode_client): + client = test_linode_client + region = get_region(client, {"Vlans", "Linodes"}, site_type="core").id + label = get_test_label() + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + label=label, + image="linode/debian12", + interfaces=[ + {"purpose": "public"}, + ConfigInterface( + purpose="vlan", label="cool-vlan", ipam_address="10.0.0.4/32" + ), + ], + ) + + assert len(linode_instance.configs[0].interfaces) == 2 + assert linode_instance.configs[0].interfaces[0].purpose == "public" + assert linode_instance.configs[0].interfaces[1].purpose == "vlan" + assert linode_instance.configs[0].interfaces[1].label == "cool-vlan" + assert ( + linode_instance.configs[0].interfaces[1].ipam_address == "10.0.0.4/32" + ) + + res = linode_instance.delete() + + assert res + + +# LongviewGroupTests +def test_get_longview_clients(test_linode_client, test_longview_client): + client = test_linode_client + + longview_client = client.longview.clients() + + client_labels = [i.label for i in longview_client] + + assert test_longview_client.label in client_labels + + +def test_client_create_with_label(test_linode_client): + client = test_linode_client + label = get_test_label() + longview_client = client.longview.client_create(label=label) + + assert label == longview_client.label + + time.sleep(5) + + res = longview_client.delete() + + assert res + + +# TODO: Subscription related test cases need to be added, currently returns a 404 +# def test_get_subscriptions(): + + +# LKEGroupTest + + +def test_kube_version(test_linode_client): + client = test_linode_client + lke_version = client.lke.versions() + + assert re.search("[0-9].[0-9]+", lke_version.first().id) + + +def test_cluster_create_with_api_objects(test_linode_client): + client = test_linode_client + node_type = client.linode.types()[1] # g6-standard-1 + version = client.lke.versions()[0] + region = get_region(client, {"Kubernetes"}) + node_pool = client.lke.node_pool(node_type, 3) + label = get_test_label() + + cluster = client.lke.cluster_create(region, label, version, [node_pool]) + + assert cluster.region.id == region.id + assert cluster.k8s_version.id == version.id + + res = cluster.delete() + + assert res + + +def test_fails_to_create_cluster_with_invalid_version(test_linode_client): + invalid_version = "a.12" + client = test_linode_client + region = get_region(client, {"Kubernetes"}).id + + try: + cluster = client.lke.cluster_create( + region, + "example-cluster", + invalid_version, + {"type": "g6-standard-1", "count": 3}, + ) + except ApiError as e: + assert "not valid" in str(e.json) + assert e.status == 400 + + +# ObjectStorageGroupTests + + +def test_get_object_storage_clusters(test_linode_client): + client = test_linode_client + + clusters = client.object_storage.clusters() + + assert "us-east" in clusters[0].id + assert "us-east" in clusters[0].region.id + + +def test_get_keys(test_linode_client, access_keys_object_storage): + client = test_linode_client + key = access_keys_object_storage + + keys = client.object_storage.keys() + key_labels = [i.label for i in keys] + + assert key.label in key_labels + + +def test_keys_create(test_linode_client, access_keys_object_storage): + key = access_keys_object_storage + + assert type(key) == type( + ObjectStorageKeys(client=test_linode_client, id="123") + ) + + +# NetworkingGroupTests + + +@pytest.fixture +def create_firewall_with_inbound_outbound_rules(test_linode_client): + client = test_linode_client + label = get_test_label() + "-firewall" + rules = { + "outbound": [ + { + "ports": "22", + "protocol": "TCP", + "addresses": {"ipv4": ["198.0.0.2/32"]}, + "action": "ACCEPT", + "label": "accept-inbound-SSH", + } + ], + "outbound_policy": "DROP", + "inbound": [ + { + "ports": "22", + "protocol": "TCP", + "addresses": {"ipv4": ["198.0.0.2/32"]}, + "action": "ACCEPT", + "label": "accept-inbound-SSH", + } + ], + "inbound_policy": "ACCEPT", + } + + firewall = client.networking.firewall_create( + label, rules=rules, status="enabled" + ) + + yield firewall + + firewall.delete() + + +def test_get_firewalls_with_inbound_outbound_rules( + test_linode_client, create_firewall_with_inbound_outbound_rules +): + client = test_linode_client + firewalls = client.networking.firewalls() + firewall = create_firewall_with_inbound_outbound_rules + + firewall_labels = [i.label for i in firewalls] + + assert firewall.label in firewall_labels + assert firewall.rules.inbound_policy == "ACCEPT" + assert firewall.rules.outbound_policy == "DROP" diff --git a/test/integration/linode_client/test_retry.py b/test/integration/linode_client/test_retry.py new file mode 100644 index 000000000..a2a8e1b3c --- /dev/null +++ b/test/integration/linode_client/test_retry.py @@ -0,0 +1,177 @@ +from test.integration.conftest import get_token + +import httpretty +import pytest + +from linode_api4 import ApiError, LinodeClient + +""" +Tests for retrying on intermittent errors. + +.. warning:: + This test class _does not_ follow normal testing conventions for this project, + as requests are not automatically mocked. Only add tests to this class if they + pertain to the retry logic, and make sure you mock the requests calls yourself + (or else they will make real requests and those won't work). +""" +ERROR_RESPONSES = [ + httpretty.Response( + body="{}", + status=408, + ), + httpretty.Response( + body="{}", + status=429, + ), + httpretty.Response( + body="{}", + status=200, + ), +] + + +def get_retry_client(): + client = LinodeClient(token=get_token(), base_url="https://localhost") + # sidestep the validation to do immediate retries so tests aren't slow + client.retry_rate_limit_interval = 0.1 + return client + + +@pytest.mark.smoke +@httpretty.activate +def test_get_retry_statuses(): + """ + Tests that retries work as expected on 408 and 429 responses. + """ + + httpretty.register_uri( + httpretty.GET, "https://localhost/test", responses=ERROR_RESPONSES + ) + + get_retry_client().get("/test") + + assert len(httpretty.latest_requests()) == 3 + + +@httpretty.activate +def test_put_retry_statuses(): + """ + Tests that retries work as expected on 408 and 429 responses. + """ + + httpretty.register_uri( + httpretty.PUT, "https://localhost/test", responses=ERROR_RESPONSES + ) + + get_retry_client().put("/test") + + assert len(httpretty.latest_requests()) == 3 + + +@httpretty.activate +def test_post_retry_statuses(): + httpretty.register_uri( + httpretty.POST, "https://localhost/test", responses=ERROR_RESPONSES + ) + + get_retry_client().post("/test") + + assert len(httpretty.latest_requests()) == 3 + + +@httpretty.activate +def test_delete_retry_statuses(): + httpretty.register_uri( + httpretty.DELETE, "https://localhost/test", responses=ERROR_RESPONSES + ) + + get_retry_client().delete("/test") + + assert len(httpretty.latest_requests()) == 3 + + +@httpretty.activate +def test_retry_max(): + """ + Tests that retries work as expected on 408 and 429 responses. + """ + + httpretty.register_uri( + httpretty.GET, + "https://localhost/test", + responses=[ + httpretty.Response( + body="{}", + status=408, + ), + httpretty.Response( + body="{}", + status=429, + ), + httpretty.Response( + body="{}", + status=429, + ), + ], + ) + + client = get_retry_client() + client.retry_max = 2 + + try: + client.get("/test") + except ApiError as err: + assert err.status == 429 + else: + raise RuntimeError("Expected retry error after exceeding max retries") + + assert len(httpretty.latest_requests()) == 3 + + +@httpretty.activate +def test_retry_disable(): + """ + Tests that retries can be disabled. + """ + + httpretty.register_uri( + httpretty.GET, + "https://localhost/test", + responses=[ + httpretty.Response( + body="{}", + status=408, + ), + ], + ) + + client = get_retry_client() + client.retry = False + + try: + client.get("/test") + except ApiError as e: + assert e.status == 408 + else: + raise RuntimeError("Expected 408 error to be raised") + + assert len(httpretty.latest_requests()) == 1 + + +@httpretty.activate +def test_retry_works_with_integer_interval_value(): + """ + Tests that retries work as expected on 408 and 429 responses. + """ + + httpretty.register_uri( + httpretty.GET, "https://localhost/test", responses=ERROR_RESPONSES + ) + + client = get_retry_client() + client.retry_max = 2 + client.retry_rate_limit_interval = 1 + + client.get("/test") + + assert len(httpretty.latest_requests()) == 3 diff --git a/test/integration/login_client/test_login_client.py b/test/integration/login_client/test_login_client.py new file mode 100644 index 000000000..24519346c --- /dev/null +++ b/test/integration/login_client/test_login_client.py @@ -0,0 +1,106 @@ +import pytest + +from linode_api4 import OAuthScopes +from linode_api4.login_client import LinodeLoginClient +from linode_api4.objects import OAuthClient + + +@pytest.fixture +def linode_login_client(test_oauth_client): + client_id = test_oauth_client.id + client_secret = test_oauth_client.secret + + login_client = LinodeLoginClient(client_id, client_secret) + + yield login_client + + +@pytest.fixture +def test_oauth_client_two(test_linode_client): + client = test_linode_client + oauth_client = client.account.oauth_client_create( + "test-oauth-client-two", "https://localhost/oauth/callback" + ) + + yield oauth_client + + oauth_client.delete() + + +@pytest.mark.smoke +def test_get_oathclient(test_linode_client, test_oauth_client): + client = test_linode_client + + oauth_client = client.load(OAuthClient, test_oauth_client.id) + + assert "_oauth" in test_oauth_client.label + assert "https://localhost/oauth/callback" == oauth_client.redirect_uri + + +def test_get_oauth_clients( + test_linode_client, test_oauth_client, test_oauth_client_two +): + oauth_clients = test_linode_client.account.oauth_clients() + + id_list = [o_cli.id for o_cli in oauth_clients] + + assert str(test_oauth_client.id) in id_list + assert str(test_oauth_client_two.id) in id_list + + +def test_get_oauth_clients_dont_reveal_secret( + test_linode_client, test_oauth_client +): + oauth_client_secret = test_linode_client.account.oauth_clients()[0].secret + + assert oauth_client_secret == "" + + +def test_edit_oauth_client_details(test_linode_client, test_oauth_client_two): + test_oauth_client_two.redirect_uri = ( + "https://localhost/oauth/callback_changed" + ) + test_oauth_client_two.label = "new_oauthclient_label" + test_oauth_client_two.save() + + oau_client = test_linode_client.load(OAuthClient, test_oauth_client_two.id) + + assert oau_client.redirect_uri == "https://localhost/oauth/callback_changed" + assert oau_client.label == "new_oauthclient_label" + + +def test_oauth_client_reset_secrets(test_oauth_client_two): + old_secret = test_oauth_client_two.secret + + new_secret = test_oauth_client_two.reset_secret() + + assert old_secret != new_secret + + +def test_linode_login_client_generate_default_login_url(linode_login_client): + client_id = linode_login_client.client_id + url = linode_login_client.generate_login_url() + + assert ( + "https://login.linode.com/oauth/authorize?client_id=" + + str(client_id) + + "&response_type=code" + == url + ) + + +def test_linode_login_client_generate_login_url_with_scope(linode_login_client): + url = linode_login_client.generate_login_url( + scopes=OAuthScopes.Linodes.read_write + ) + + assert "scopes=linodes%3Aread_write" in url + + +@pytest.mark.skip("Endpoint may be deprecated") +def test_linode_login_client_expire_token( + linode_login_client, test_oauth_client +): + result = linode_login_client.expire_token(token=test_oauth_client.secret) + + assert result is True diff --git a/test/integration/models/__init__.py b/test/integration/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/integration/models/account/test_account.py b/test/integration/models/account/test_account.py new file mode 100644 index 000000000..4c4dcc134 --- /dev/null +++ b/test/integration/models/account/test_account.py @@ -0,0 +1,169 @@ +import time +from datetime import datetime +from test.integration.conftest import get_region +from test.integration.helpers import ( + get_test_label, + retry_sending_request, + wait_for_condition, +) + +import pytest + +from linode_api4.objects import ( + Account, + AccountSettings, + ChildAccount, + Event, + Login, + User, +) + + +@pytest.mark.smoke +def test_get_account(test_linode_client): + client = test_linode_client + account = client.account() + account_id = account.id + account_get = client.load(Account, account_id) + + assert account_get.first_name == account.first_name + assert account_get.last_name == account.last_name + assert account_get.email == account.email + assert account_get.phone == account.phone + assert account_get.address_1 == account.address_1 + assert account_get.address_2 == account.address_2 + assert account_get.city == account.city + assert account_get.state == account.state + assert account_get.country == account.country + assert account_get.zip == account.zip + assert account_get.tax_id == account.tax_id + + +def test_get_login(test_linode_client): + client = test_linode_client + login = retry_sending_request(3, client.load, Login(client, "", {}), "") + + updated_time = int(time.mktime(getattr(login, "_last_updated").timetuple())) + + login_updated = int(time.time()) - updated_time + + assert "username" in str(login._raw_json) + assert "ip" in str(login._raw_json) + assert "datetime" in str(login._raw_json) + assert "status" in str(login._raw_json) + assert login_updated < 15 + + +def test_get_account_settings(test_linode_client): + client = test_linode_client + account_settings = client.load(AccountSettings(client, ""), "") + + assert "managed" in str(account_settings._raw_json) + assert "network_helper" in str(account_settings._raw_json) + assert "longview_subscription" in str(account_settings._raw_json) + assert "backups_enabled" in str(account_settings._raw_json) + assert "object_storage" in str(account_settings._raw_json) + assert isinstance(account_settings.interfaces_for_new_linodes, str) + assert "maintenance_policy" in str(account_settings._raw_json) + + +def test_update_maintenance_policy(test_linode_client): + client = test_linode_client + settings = client.load(AccountSettings(client, ""), "") + + original_policy = settings.maintenance_policy + new_policy = ( + "linode/power_off_on" + if original_policy == "linode/migrate" + else "linode/migrate" + ) + + settings.maintenance_policy = new_policy + settings.save() + + updated = client.load(AccountSettings(client, ""), "") + assert updated.maintenance_policy == new_policy + + settings.maintenance_policy = original_policy + settings.save() + + updated = client.load(AccountSettings(client, ""), "") + assert updated.maintenance_policy == original_policy + + +@pytest.mark.smoke +def test_latest_get_event(test_linode_client, e2e_test_firewall): + client = test_linode_client + + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label() + + linode, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label, + firewall=e2e_test_firewall, + ) + + def get_linode_status(): + return linode.status == "running" + + # To ensure the Linode is running and the 'event' key has been populated + wait_for_condition(3, 100, get_linode_status) + + events = client.load(Event, "") + latest_events = events._raw_json.get("data")[:15] + + linode.delete() + + for event in latest_events: + if label == event["entity"]["label"]: + break + else: + assert False, f"Linode '{label}' not found in the last 15 events" + + +def test_get_user(test_linode_client): + client = test_linode_client + + events = client.load(Event, "") + + username = events._raw_json.get("data")[0]["username"] + + user = client.load(User, username) + + assert username == user.username + assert "email" in user._raw_json + + +def test_list_child_accounts(test_linode_client): + pytest.skip("Configure test account settings for Parent child") + client = test_linode_client + child_accounts = client.account.child_accounts() + if len(child_accounts) > 0: + child_account = ChildAccount(client, child_accounts[0].euuid) + child_account._api_get() + child_account.create_token() + + +def test_get_invoice(test_linode_client): + client = test_linode_client + + invoices = client.account.invoices() + + if len(invoices) > 0: + assert isinstance(invoices[0].subtotal, float) + assert isinstance(invoices[0].tax, float) + assert isinstance(invoices[0].total, float) + assert r"'billing_source': 'linode'" in str(invoices[0]._raw_json) + + +def test_get_payments(test_linode_client): + client = test_linode_client + + payments = client.account.payments() + + if len(payments) > 0: + assert isinstance(payments[0].date, datetime) + assert isinstance(payments[0].usd, float) diff --git a/test/integration/models/database/helpers.py b/test/integration/models/database/helpers.py new file mode 100644 index 000000000..134e7e7c2 --- /dev/null +++ b/test/integration/models/database/helpers.py @@ -0,0 +1,132 @@ +from linode_api4 import LinodeClient +from linode_api4.objects import ( + MySQLDatabase, + MySQLDatabaseConfigMySQLOptions, + MySQLDatabaseConfigOptions, + PostgreSQLDatabase, + PostgreSQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigPGOptions, +) + + +# Test Helpers +def get_db_engine_id(client: LinodeClient, engine: str): + engines = client.database.engines() + engine_id = "" + for e in engines: + if e.engine == engine: + engine_id = e.id + + return str(engine_id) + + +def get_sql_db_status(client: LinodeClient, db_id, status: str): + db = client.load(MySQLDatabase, db_id) + return db.status == status + + +def get_postgres_db_status(client: LinodeClient, db_id, status: str): + db = client.load(PostgreSQLDatabase, db_id) + return db.status == status + + +def make_full_mysql_engine_config(): + return MySQLDatabaseConfigOptions( + binlog_retention_period=600, + mysql=MySQLDatabaseConfigMySQLOptions( + connect_timeout=20, + default_time_zone="+00:00", + group_concat_max_len=1024, + information_schema_stats_expiry=900, + innodb_change_buffer_max_size=25, + innodb_flush_neighbors=1, + innodb_ft_min_token_size=3, + innodb_ft_server_stopword_table="db_name/table_name", + innodb_lock_wait_timeout=50, + innodb_log_buffer_size=16777216, + innodb_online_alter_log_max_size=134217728, + innodb_read_io_threads=4, + innodb_rollback_on_timeout=True, + innodb_thread_concurrency=8, + innodb_write_io_threads=4, + interactive_timeout=300, + internal_tmp_mem_storage_engine="TempTable", + max_allowed_packet=67108864, + max_heap_table_size=16777216, + net_buffer_length=16384, + net_read_timeout=30, + net_write_timeout=60, + sort_buffer_size=262144, + sql_mode="TRADITIONAL", + sql_require_primary_key=False, + tmp_table_size=16777216, + wait_timeout=28800, + ), + ) + + +def make_mysql_engine_config_w_nullable_field(): + return MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions( + innodb_ft_server_stopword_table=None, + ), + ) + + +def make_full_postgres_engine_config(): + return PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_scale_factor=0.1, + autovacuum_analyze_threshold=50, + autovacuum_max_workers=3, + autovacuum_naptime=60, + autovacuum_vacuum_cost_delay=20, + autovacuum_vacuum_cost_limit=200, + autovacuum_vacuum_scale_factor=0.2, + autovacuum_vacuum_threshold=50, + bgwriter_delay=200, + bgwriter_flush_after=64, + bgwriter_lru_maxpages=100, + bgwriter_lru_multiplier=2.0, + deadlock_timeout=1000, + default_toast_compression="lz4", + idle_in_transaction_session_timeout=600000, + jit=True, + max_files_per_process=1000, + max_locks_per_transaction=64, + max_logical_replication_workers=4, + max_parallel_workers=4, + max_parallel_workers_per_gather=2, + max_pred_locks_per_transaction=64, + max_replication_slots=10, + max_slot_wal_keep_size=2048, + max_stack_depth=6291456, + max_standby_archive_delay=30000, + max_standby_streaming_delay=30000, + max_wal_senders=20, + max_worker_processes=8, + password_encryption="scram-sha-256", + temp_file_limit=1, + timezone="UTC", + track_activity_query_size=2048, + track_functions="all", + wal_sender_timeout=60000, + wal_writer_delay=200, + pg_partman_bgw_interval=3600, + pg_partman_bgw_role="myrolename", + pg_stat_monitor_pgsm_enable_query_plan=True, + pg_stat_monitor_pgsm_max_buckets=2, + pg_stat_statements_track="top", + ), + pg_stat_monitor_enable=True, + shared_buffers_percentage=25.0, + work_mem=1024, + ) + + +def make_postgres_engine_config_w_password_encryption_null(): + return PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + password_encryption=None, + ), + ) diff --git a/test/integration/models/database/test_database.py b/test/integration/models/database/test_database.py new file mode 100644 index 000000000..7092eca06 --- /dev/null +++ b/test/integration/models/database/test_database.py @@ -0,0 +1,470 @@ +import os +import time +from test.integration.helpers import ( + get_test_label, + send_request_when_resource_available, + wait_for_condition, +) +from test.integration.models.database.helpers import ( + get_db_engine_id, + get_postgres_db_status, + get_sql_db_status, +) + +import pytest + +from linode_api4.objects import MySQLDatabase, PostgreSQLDatabase + + +@pytest.fixture(scope="session") +def test_create_sql_db(test_linode_client): + client = test_linode_client + label = get_test_label() + "-sqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "mysql") + dbtype = "g6-standard-1" + + db = client.database.mysql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + ) + + def get_db_status(): + return db.status == "active" + + # TAKES 15-30 MINUTES TO FULLY PROVISION DB + wait_for_condition(60, 2000, get_db_status) + + yield db + + send_request_when_resource_available(300, db.delete) + + +@pytest.fixture(scope="session") +def test_create_postgres_db(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "postgresql") + dbtype = "g6-standard-1" + + db = client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + ) + + def get_db_status(): + return db.status == "active" + + # TAKES 15-30 MINUTES TO FULLY PROVISION DB + wait_for_condition(60, 2000, get_db_status) + + yield db + + send_request_when_resource_available(300, db.delete) + + +@pytest.mark.skipif( + os.getenv("RUN_DB_FORK_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_FORK_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_fork_sql_db(test_linode_client, test_create_sql_db): + client = test_linode_client + db_fork = client.database.mysql_fork( + test_create_sql_db.id, test_create_sql_db.updated + ) + + def get_db_fork_status(): + return db_fork.status == "active" + + # TAKES 15-30 MINUTES TO FULLY PROVISION DB + wait_for_condition(60, 2000, get_db_fork_status) + + assert db_fork.fork.source == test_create_sql_db.id + + db_fork.delete() + + +@pytest.mark.skipif( + os.getenv("RUN_DB_FORK_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_FORK_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_fork_postgres_db(test_linode_client, test_create_postgres_db): + client = test_linode_client + db_fork = client.database.postgresql_fork( + test_create_postgres_db.id, test_create_postgres_db.updated + ) + + def get_db_fork_status(): + return db_fork.status == "active" + + # TAKES 15-30 MINUTES TO FULLY PROVISION DB + wait_for_condition(60, 2000, get_db_fork_status) + + assert db_fork.fork.source == test_create_postgres_db.id + + db_fork.delete() + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_types(test_linode_client): + client = test_linode_client + types = client.database.types() + + assert "nanode" in types[0].type_class + assert "g6-nanode-1" in types[0].id + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_engines(test_linode_client): + client = test_linode_client + engines = client.database.engines() + + for e in engines: + assert e.engine in ["mysql", "postgresql"] + # assert re.search("[0-9]+.[0-9]+", e.version) + assert e.id == e.engine + "/" + e.version + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_database_instance(test_linode_client, test_create_sql_db): + dbs = test_linode_client.database.mysql_instances() + + assert str(test_create_sql_db.id) in str(dbs.lists) + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_mysql_suspend_resume(test_linode_client, test_create_sql_db): + db = test_linode_client.load(MySQLDatabase, test_create_sql_db.id) + + db.suspend() + + wait_for_condition( + 10, + 300, + get_sql_db_status, + test_linode_client, + test_create_sql_db.id, + "suspended", + ) + + assert db.status == "suspended" + + db.resume() + + wait_for_condition( + 30, + 600, + get_sql_db_status, + test_linode_client, + test_create_sql_db.id, + "active", + ) + + assert db.status == "active" + + +# ------- POSTGRESQL DB Test cases ------- +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_sql_db_instance(test_linode_client, test_create_sql_db): + dbs = test_linode_client.database.mysql_instances() + database = "" + for db in dbs: + if db.id == test_create_sql_db.id: + database = db + + assert str(test_create_sql_db.id) == str(database.id) + assert str(test_create_sql_db.label) == str(database.label) + assert database.cluster_size == 1 + assert database.engine == "mysql" + assert ".g2a.akamaidb.net" in database.hosts.primary + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_update_sql_db(test_linode_client, test_create_sql_db): + db = test_linode_client.load(MySQLDatabase, test_create_sql_db.id) + + new_allow_list = ["192.168.0.1/32"] + label = get_test_label() + "updatedSQLDB" + + db.allow_list = new_allow_list + db.updates.day_of_week = 2 + db.label = label + + res = db.save() + + wait_for_condition( + 30, + 300, + get_sql_db_status, + test_linode_client, + test_create_sql_db.id, + "active", + ) + + database = test_linode_client.load(MySQLDatabase, test_create_sql_db.id) + + assert res + assert database.allow_list == new_allow_list + # Label assertion is commented out because the API updates + # the label intermittently, causing test failures. The issue + # is tracked in TPT-4268. + # assert database.label == label + assert database.updates.day_of_week == 2 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_sql_ssl(test_linode_client, test_create_sql_db): + db = test_linode_client.load(MySQLDatabase, test_create_sql_db.id) + + assert "ca_certificate" in str(db.ssl) + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_sql_patch(test_linode_client, test_create_sql_db): + db = test_linode_client.load(MySQLDatabase, test_create_sql_db.id) + + db.patch() + + wait_for_condition( + 10, + 300, + get_sql_db_status, + test_linode_client, + test_create_sql_db.id, + "updating", + ) + + assert db.status == "updating" + + wait_for_condition( + 30, + 1000, + get_sql_db_status, + test_linode_client, + test_create_sql_db.id, + "active", + ) + + assert db.status == "active" + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_sql_credentials(test_linode_client, test_create_sql_db): + db = test_linode_client.load(MySQLDatabase, test_create_sql_db.id) + + assert db.credentials.username == "akmadmin" + assert db.credentials.password + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_reset_sql_credentials(test_linode_client, test_create_sql_db): + db = test_linode_client.load(MySQLDatabase, test_create_sql_db.id) + + old_pass = str(db.credentials.password) + db.credentials_reset() + + time.sleep(5) + assert db.credentials.username == "akmadmin" + assert db.credentials.password != old_pass + + +# ------- POSTGRESQL DB Test cases ------- +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_postgres_db_instance(test_linode_client, test_create_postgres_db): + dbs = test_linode_client.database.postgresql_instances() + + database = None + + for db in dbs: + if db.id == test_create_postgres_db.id: + database = db + + assert str(test_create_postgres_db.id) == str(database.id) + assert str(test_create_postgres_db.label) == str(database.label) + assert database.cluster_size == 1 + assert database.engine == "postgresql" + assert "g2a.akamaidb.net" in database.hosts.primary + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_update_postgres_db(test_linode_client, test_create_postgres_db): + db = test_linode_client.load(PostgreSQLDatabase, test_create_postgres_db.id) + + new_allow_list = ["192.168.0.1/32"] + label = get_test_label() + "updatedPostgresDB" + + db.allow_list = new_allow_list + db.updates.day_of_week = 2 + db.label = label + + res = db.save() + + wait_for_condition( + 30, + 1000, + get_postgres_db_status, + test_linode_client, + test_create_postgres_db.id, + "active", + ) + + database = test_linode_client.load( + PostgreSQLDatabase, test_create_postgres_db.id + ) + + assert res + assert database.allow_list == new_allow_list + # Label assertion is commented out because the API updates + # the label intermittently, causing test failures. The issue + # is tracked in TPT-4268. + # assert database.label == label + assert database.updates.day_of_week == 2 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_postgres_ssl(test_linode_client, test_create_postgres_db): + db = test_linode_client.load(PostgreSQLDatabase, test_create_postgres_db.id) + + assert "ca_certificate" in str(db.ssl) + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_postgres_patch(test_linode_client, test_create_postgres_db): + db = test_linode_client.load(PostgreSQLDatabase, test_create_postgres_db.id) + + db.patch() + + wait_for_condition( + 10, + 300, + get_postgres_db_status, + test_linode_client, + test_create_postgres_db.id, + "updating", + ) + + assert db.status == "updating" + + wait_for_condition( + 30, + 600, + get_postgres_db_status, + test_linode_client, + test_create_postgres_db.id, + "active", + ) + + assert db.status == "active" + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_postgres_credentials(test_linode_client, test_create_postgres_db): + db = test_linode_client.load(PostgreSQLDatabase, test_create_postgres_db.id) + + assert db.credentials.username == "akmadmin" + assert db.credentials.password + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_reset_postgres_credentials( + test_linode_client, test_create_postgres_db +): + db = test_linode_client.load(PostgreSQLDatabase, test_create_postgres_db.id) + + old_pass = str(db.credentials.password) + + db.credentials_reset() + + time.sleep(5) + + assert db.credentials.username == "akmadmin" + assert db.credentials.password != old_pass + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_postgres_suspend_resume(test_linode_client, test_create_postgres_db): + db = test_linode_client.load(PostgreSQLDatabase, test_create_postgres_db.id) + + db.suspend() + + wait_for_condition( + 10, + 300, + get_postgres_db_status, + test_linode_client, + test_create_postgres_db.id, + "suspended", + ) + + assert db.status == "suspended" + + db.resume() + + wait_for_condition( + 30, + 600, + get_postgres_db_status, + test_linode_client, + test_create_postgres_db.id, + "active", + ) + + assert db.status == "active" diff --git a/test/integration/models/database/test_database_engine_config.py b/test/integration/models/database/test_database_engine_config.py new file mode 100644 index 000000000..184b63522 --- /dev/null +++ b/test/integration/models/database/test_database_engine_config.py @@ -0,0 +1,475 @@ +import os +from test.integration.helpers import ( + get_test_label, + send_request_when_resource_available, + wait_for_condition, +) +from test.integration.models.database.helpers import ( + get_db_engine_id, + get_postgres_db_status, + get_sql_db_status, + make_full_mysql_engine_config, + make_full_postgres_engine_config, + make_mysql_engine_config_w_nullable_field, + make_postgres_engine_config_w_password_encryption_null, +) + +import pytest + +from linode_api4.errors import ApiError +from linode_api4.objects import ( + MySQLDatabase, + MySQLDatabaseConfigMySQLOptions, + MySQLDatabaseConfigOptions, + PostgreSQLDatabase, + PostgreSQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigPGOptions, +) + + +@pytest.fixture(scope="session") +def mysql_db_with_engine_config(test_linode_client): + client = test_linode_client + label = get_test_label() + "-sqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "mysql") + dbtype = "g6-standard-1" + + db = client.database.mysql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=make_full_mysql_engine_config(), + ) + + def get_db_status(): + return db.status == "active" + + # Usually take 10-15m to provision + wait_for_condition(60, 2000, get_db_status) + + yield db + + send_request_when_resource_available(300, db.delete) + + +@pytest.fixture(scope="session") +def postgres_db_with_engine_config(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = "postgresql/17" + dbtype = "g6-standard-1" + + db = client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=make_full_postgres_engine_config(), + ) + + def get_db_status(): + return db.status == "active" + + # Usually take 10-15m to provision + wait_for_condition(60, 2000, get_db_status) + + yield db + + send_request_when_resource_available(300, db.delete) + + +# MYSQL +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_mysql_config(test_linode_client): + config = test_linode_client.database.mysql_config_options() + + # Top-level keys + assert "binlog_retention_period" in config + assert "mysql" in config + + # binlog_retention_period checks + brp = config["binlog_retention_period"] + assert isinstance(brp, dict) + assert brp["type"] == "integer" + assert brp["minimum"] == 600 + assert brp["maximum"] == 9007199254740991 + assert brp["requires_restart"] is False + + # mysql sub-keys + mysql = config["mysql"] + + # mysql valid fields + expected_keys = [ + "connect_timeout", + "default_time_zone", + "group_concat_max_len", + "information_schema_stats_expiry", + "innodb_change_buffer_max_size", + "innodb_flush_neighbors", + "innodb_ft_min_token_size", + "innodb_ft_server_stopword_table", + "innodb_lock_wait_timeout", + "innodb_log_buffer_size", + "innodb_online_alter_log_max_size", + "innodb_read_io_threads", + "innodb_rollback_on_timeout", + "innodb_thread_concurrency", + "innodb_write_io_threads", + "interactive_timeout", + "internal_tmp_mem_storage_engine", + "max_allowed_packet", + "max_heap_table_size", + "net_buffer_length", + "net_read_timeout", + "net_write_timeout", + "sort_buffer_size", + "sql_mode", + "sql_require_primary_key", + "tmp_table_size", + "wait_timeout", + ] + + # Assert all valid fields are present + for key in expected_keys: + assert key in mysql, f"{key} not found in mysql config" + + assert mysql["connect_timeout"]["type"] == "integer" + assert mysql["default_time_zone"]["type"] == "string" + assert mysql["innodb_rollback_on_timeout"]["type"] == "boolean" + assert "enum" in mysql["internal_tmp_mem_storage_engine"] + assert "pattern" in mysql["sql_mode"] + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_mysql_with_engine_config(mysql_db_with_engine_config): + db = mysql_db_with_engine_config + actual_config = db.engine_config.mysql + expected_config = make_full_mysql_engine_config().mysql.__dict__ + + for key, expected_value in expected_config.items(): + actual_value = getattr(actual_config, key) + assert ( + actual_value == expected_value + ), f"{key} mismatch: expected {expected_value}, got {actual_value}" + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_update_mysql_engine_config( + test_linode_client, mysql_db_with_engine_config +): + db = mysql_db_with_engine_config + + db.updates.day_of_week = 2 + db.engine_config = MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions(connect_timeout=50), + binlog_retention_period=880, + ) + + db.save() + + wait_for_condition( + 30, + 300, + get_sql_db_status, + test_linode_client, + db.id, + "active", + ) + + database = test_linode_client.load(MySQLDatabase, db.id) + + assert database.updates.day_of_week == 2 + assert database.engine_config.mysql.connect_timeout == 50 + assert database.engine_config.binlog_retention_period == 880 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_list_mysql_engine_config( + test_linode_client, mysql_db_with_engine_config +): + dbs = test_linode_client.database.mysql_instances() + + db_ids = [db.id for db in dbs] + + assert mysql_db_with_engine_config.id in db_ids + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_mysql_engine_config( + test_linode_client, mysql_db_with_engine_config +): + db = test_linode_client.load(MySQLDatabase, mysql_db_with_engine_config.id) + + assert isinstance(db, MySQLDatabase) + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_mysql_db_nullable_field(test_linode_client): + client = test_linode_client + label = get_test_label(5) + "-sqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "mysql") + dbtype = "g6-standard-1" + + db = client.database.mysql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=make_mysql_engine_config_w_nullable_field(), + ) + + assert db.engine_config.mysql.innodb_ft_server_stopword_table is None + + send_request_when_resource_available(300, db.delete) + + +# POSTGRESQL +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_postgres_config(test_linode_client): + config = test_linode_client.database.postgresql_config_options() + + # Top-level keys and structure + assert "pg" in config + + assert "pg_stat_monitor_enable" in config + assert config["pg_stat_monitor_enable"]["type"] == "boolean" + + assert "shared_buffers_percentage" in config + assert config["shared_buffers_percentage"]["type"] == "number" + assert config["shared_buffers_percentage"]["minimum"] >= 1 + + assert "work_mem" in config + assert config["work_mem"]["type"] == "integer" + assert "minimum" in config["work_mem"] + + pg = config["pg"] + + # postgres valid fields + expected_keys = [ + "autovacuum_analyze_scale_factor", + "autovacuum_analyze_threshold", + "autovacuum_max_workers", + "autovacuum_naptime", + "autovacuum_vacuum_cost_delay", + "autovacuum_vacuum_cost_limit", + "autovacuum_vacuum_scale_factor", + "autovacuum_vacuum_threshold", + "bgwriter_delay", + "bgwriter_flush_after", + "bgwriter_lru_maxpages", + "bgwriter_lru_multiplier", + "deadlock_timeout", + "default_toast_compression", + "idle_in_transaction_session_timeout", + "jit", + "max_files_per_process", + "max_locks_per_transaction", + "max_logical_replication_workers", + "max_parallel_workers", + "max_parallel_workers_per_gather", + "max_pred_locks_per_transaction", + "max_replication_slots", + "max_slot_wal_keep_size", + "max_stack_depth", + "max_standby_archive_delay", + "max_standby_streaming_delay", + "max_wal_senders", + "max_worker_processes", + "password_encryption", + "pg_partman_bgw.interval", + "pg_partman_bgw.role", + "pg_stat_monitor.pgsm_enable_query_plan", + "pg_stat_monitor.pgsm_max_buckets", + "pg_stat_statements.track", + "temp_file_limit", + "timezone", + "track_activity_query_size", + "track_commit_timestamp", + "track_functions", + "track_io_timing", + "wal_sender_timeout", + "wal_writer_delay", + ] + + # Assert all valid fields are present + for key in expected_keys: + assert key in pg, f"{key} not found in postgresql config" + + assert pg["autovacuum_analyze_scale_factor"]["type"] == "number" + assert pg["autovacuum_analyze_threshold"]["type"] == "integer" + assert pg["autovacuum_max_workers"]["requires_restart"] is True + assert pg["default_toast_compression"]["enum"] == ["lz4", "pglz"] + assert pg["jit"]["type"] == "boolean" + assert "enum" in pg["password_encryption"] + assert "pattern" in pg["pg_partman_bgw.role"] + assert pg["pg_stat_monitor.pgsm_enable_query_plan"]["type"] == "boolean" + assert pg["pg_stat_monitor.pgsm_max_buckets"]["requires_restart"] is True + assert pg["pg_stat_statements.track"]["enum"] == ["all", "top", "none"] + assert pg["track_commit_timestamp"]["enum"] == ["off", "on"] + assert pg["track_functions"]["enum"] == ["all", "pl", "none"] + assert pg["track_io_timing"]["enum"] == ["off", "on"] + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_postgres_with_engine_config( + test_linode_client, postgres_db_with_engine_config +): + db = postgres_db_with_engine_config + actual_config = db.engine_config.pg + expected_config = make_full_postgres_engine_config().pg.__dict__ + + for key, expected_value in expected_config.items(): + actual_value = getattr(actual_config, key, None) + assert ( + actual_value is None or actual_value == expected_value + ), f"{key} mismatch: expected {expected_value}, got {actual_value}" + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_update_postgres_engine_config( + test_linode_client, postgres_db_with_engine_config +): + db = postgres_db_with_engine_config + + db.updates.day_of_week = 2 + db.engine_config = PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_threshold=70, deadlock_timeout=2000 + ), + shared_buffers_percentage=25.0, + ) + + db.save() + + wait_for_condition( + 30, + 300, + get_postgres_db_status, + test_linode_client, + db.id, + "active", + ) + + database = test_linode_client.load(PostgreSQLDatabase, db.id) + + assert database.updates.day_of_week == 2 + assert database.engine_config.pg.autovacuum_analyze_threshold == 70 + assert database.engine_config.pg.deadlock_timeout == 2000 + assert database.engine_config.shared_buffers_percentage == 25.0 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_pg13_with_lz4_error(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "postgresql/13") + dbtype = "g6-standard-1" + + try: + client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + default_toast_compression="lz4" + ), + work_mem=4, + ), + ) + except ApiError as e: + assert "An error occurred" in str(e.json) + assert e.status == 500 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_list_postgres_engine_config( + test_linode_client, postgres_db_with_engine_config +): + dbs = test_linode_client.database.postgresql_instances() + + db_ids = [db.id for db in dbs] + + assert postgres_db_with_engine_config.id in db_ids + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_postgres_engine_config( + test_linode_client, postgres_db_with_engine_config +): + db = test_linode_client.load( + PostgreSQLDatabase, postgres_db_with_engine_config.id + ) + + assert isinstance(db, PostgreSQLDatabase) + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_postgres_db_password_encryption_default_md5(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = "postgresql/17" + dbtype = "g6-standard-1" + + db = client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=make_postgres_engine_config_w_password_encryption_null(), + ) + + assert db.engine_config.pg.password_encryption == "md5" + + send_request_when_resource_available(300, db.delete) diff --git a/test/integration/models/domain/test_domain.py b/test/integration/models/domain/test_domain.py new file mode 100644 index 000000000..d7956d421 --- /dev/null +++ b/test/integration/models/domain/test_domain.py @@ -0,0 +1,59 @@ +import re +import time +from test.integration.helpers import wait_for_condition + +import pytest + +from linode_api4.objects import Domain, DomainRecord + + +@pytest.mark.smoke +def test_get_domain_record(test_linode_client, test_domain): + dr = DomainRecord( + test_linode_client, test_domain.records.first().id, test_domain.id + ) + + assert dr.id == test_domain.records.first().id + + +def test_save_null_values_excluded(test_linode_client, test_domain): + domain = test_linode_client.load(Domain, test_domain.id) + + domain.type = "master" + domain.master_ips = ["127.0.0.1"] + res = domain.save() + + +def test_zone_file_view(test_linode_client, test_domain): + domain = test_linode_client.load(Domain, test_domain.id) + + def get_zone_file_view(): + res = domain.zone_file_view() + return res != [] + + wait_for_condition(10, 100, get_zone_file_view) + + assert domain.domain in str(domain.zone_file_view()) + assert re.search("ns[0-9].linode.com", str(domain.zone_file_view())) + + +def test_clone(test_linode_client, test_domain): + domain = test_linode_client.load(Domain, test_domain.id) + timestamp = str(time.time_ns()) + dom = "example.clone-" + timestamp + "-inttestsdk.org" + domain.clone(dom) + + time.sleep(1) + + ds = test_linode_client.domains() + + domains = [i.domain for i in ds] + + assert dom in domains + + +def test_import(test_linode_client, test_domain): + pytest.skip( + 'Currently failing with message: linode_api4.errors.ApiError: 400: An unknown error occured. Please open a ticket for further assistance. Command: domain_import(domain, "google.ca")' + ) + domain = test_linode_client.load(Domain, test_domain.id) diff --git a/test/integration/models/firewall/test_firewall.py b/test/integration/models/firewall/test_firewall.py new file mode 100644 index 000000000..16805f3b8 --- /dev/null +++ b/test/integration/models/firewall/test_firewall.py @@ -0,0 +1,84 @@ +import time +from test.integration.conftest import get_region +from test.integration.helpers import get_test_label + +import pytest + +from linode_api4.objects import Firewall, FirewallDevice + + +@pytest.fixture(scope="session") +def linode_fw(test_linode_client): + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label() + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", region, image="linode/debian12", label=label + ) + + yield linode_instance + + linode_instance.delete() + + +@pytest.mark.smoke +def test_get_firewall_rules(test_linode_client, test_firewall): + firewall = test_linode_client.load(Firewall, test_firewall.id) + rules = firewall.rules + + assert rules.inbound_policy in ["ACCEPT", "DROP"] + assert rules.outbound_policy in ["ACCEPT", "DROP"] + + +@pytest.mark.smoke +def test_update_firewall_rules(test_linode_client, test_firewall): + firewall = test_linode_client.load(Firewall, test_firewall.id) + new_rules = { + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": ["0.0.0.0/0"], + "ipv6": ["ff00::/8"], + }, + "description": "A really cool firewall rule.", + "label": "really-cool-firewall-rule", + "ports": "80", + "protocol": "TCP", + } + ], + "inbound_policy": "ACCEPT", + "outbound": [], + "outbound_policy": "DROP", + } + + firewall.update_rules(new_rules) + + time.sleep(1) + + firewall = test_linode_client.load(Firewall, test_firewall.id) + + assert firewall.rules.inbound_policy == "ACCEPT" + assert firewall.rules.outbound_policy == "DROP" + + +def test_get_devices(test_linode_client, linode_fw, test_firewall): + linode = linode_fw + + test_firewall.device_create(int(linode.id)) + + firewall = test_linode_client.load(Firewall, test_firewall.id) + + assert len(firewall.devices) > 0 + + +def test_get_device(test_linode_client, test_firewall, linode_fw): + firewall = test_firewall + + firewall_device = test_linode_client.load( + FirewallDevice, firewall.devices.first().id, firewall.id + ) + + assert firewall_device.entity.type == "linode" + assert "/v4/linode/instances/" in firewall_device.entity.url diff --git a/test/integration/models/firewall/test_firewall_templates.py b/test/integration/models/firewall/test_firewall_templates.py new file mode 100644 index 000000000..11d6ccb6f --- /dev/null +++ b/test/integration/models/firewall/test_firewall_templates.py @@ -0,0 +1,33 @@ +from linode_api4 import FirewallTemplate, MappedObject + + +def __assert_firewall_template_rules(rules: MappedObject): + # We can't confidently say that these rules will not be changed + # in the future, so we can just do basic assertions here. + assert isinstance(rules.inbound_policy, str) + assert len(rules.inbound_policy) > 0 + + assert isinstance(rules.outbound_policy, str) + assert len(rules.outbound_policy) > 0 + + assert isinstance(rules.outbound, list) + assert isinstance(rules.inbound, list) + + +def test_list_firewall_templates(test_linode_client): + templates = test_linode_client.networking.firewall_templates() + assert len(templates) > 0 + + for template in templates: + assert isinstance(template.slug, str) + assert len(template.slug) > 0 + + __assert_firewall_template_rules(template.rules) + + +def test_get_firewall_template(test_linode_client): + template = test_linode_client.load(FirewallTemplate, "vpc") + + assert template.slug == "vpc" + + __assert_firewall_template_rules(template.rules) diff --git a/test/integration/models/image/test_image.py b/test/integration/models/image/test_image.py new file mode 100644 index 000000000..18e223ff0 --- /dev/null +++ b/test/integration/models/image/test_image.py @@ -0,0 +1,125 @@ +from io import BytesIO +from test.integration.conftest import get_regions +from test.integration.helpers import get_test_label + +import polling +import pytest + +from linode_api4 import LinodeClient +from linode_api4.objects import Image + +DISALLOWED_IMAGE_REGIONS = { + "gb-lon", + "au-mel", + "sg-sin-2", + "jp-tyo-3", + "no-osl-1", +} + + +def get_image_upload_regions(client: LinodeClient): + """ + This is necessary because the API does not currently expose + a capability for regions that allow custom image uploads. + + In the future, we should remove this if the API exposes a custom images capability or + if all Object Storage regions support custom images. + """ + + return [ + region + for region in get_regions( + client, + capabilities={"Linodes", "Object Storage"}, + site_type="core", + ) + if region.id not in DISALLOWED_IMAGE_REGIONS + ] + + +@pytest.fixture(scope="session") +def image_upload_url(test_linode_client): + label = get_test_label() + "_image" + + region = get_image_upload_regions(test_linode_client)[0] + + test_linode_client.image_create_upload( + label, region.id, "integration test image upload" + ) + + image = test_linode_client.images()[0] + + yield image + + image.delete() + + +@pytest.fixture(scope="session") +def test_uploaded_image(test_linode_client): + test_image_content = ( + b"\x1f\x8b\x08\x08\xbd\x5c\x91\x60\x00\x03\x74\x65\x73\x74\x2e\x69" + b"\x6d\x67\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00" + ) + + label = get_test_label() + "_image" + + regions = get_image_upload_regions(test_linode_client) + + image = test_linode_client.image_upload( + label, + regions[1].id, + BytesIO(test_image_content), + description="integration test image upload", + tags=["tests"], + ) + + yield image, regions + + image.delete() + + +@pytest.mark.smoke +def test_get_image(test_linode_client, image_upload_url): + image = test_linode_client.load(Image, image_upload_url.id) + + assert image.label == image_upload_url.label + + +def test_image_create_upload(test_linode_client, test_uploaded_image): + uploaded_image, _ = test_uploaded_image + + image = test_linode_client.load(Image, uploaded_image.id) + + assert image.label == uploaded_image.label + assert image.description == "integration test image upload" + assert image.tags[0] == "tests" + + +@pytest.mark.smoke +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_image_replication(test_linode_client, test_uploaded_image): + uploaded_image, regions = test_uploaded_image + + image = test_linode_client.load(Image, uploaded_image.id) + + # wait for image to be available for replication + def poll_func() -> bool: + image._api_get() + return image.status in {"available"} + + try: + polling.poll( + poll_func, + step=10, + timeout=250, + ) + except polling.TimeoutException: + print("failed to wait for image status: timeout period expired.") + + replicate_regions = [r.id for r in regions[:2]] + image.replicate(replicate_regions) + + assert image.label == uploaded_image.label + assert len(image.regions) == 2 + assert image.regions[0].region in replicate_regions + assert image.regions[1].region in replicate_regions diff --git a/test/integration/models/linode/interfaces/test_interfaces.py b/test/integration/models/linode/interfaces/test_interfaces.py new file mode 100644 index 000000000..650a9cb6c --- /dev/null +++ b/test/integration/models/linode/interfaces/test_interfaces.py @@ -0,0 +1,361 @@ +import copy +import ipaddress + +import pytest + +from linode_api4 import ( + ApiError, + Instance, + LinodeInterface, + LinodeInterfaceDefaultRouteOptions, + LinodeInterfacePublicIPv4AddressOptions, + LinodeInterfacePublicIPv4Options, + LinodeInterfacePublicIPv6Options, + LinodeInterfacePublicIPv6RangeOptions, + LinodeInterfacePublicOptions, + LinodeInterfaceVLANOptions, + LinodeInterfaceVPCIPv4AddressOptions, + LinodeInterfaceVPCIPv4Options, + LinodeInterfaceVPCIPv4RangeOptions, + LinodeInterfaceVPCOptions, +) + + +def test_linode_create_with_linode_interfaces( + create_vpc_with_subnet, + linode_with_linode_interfaces, +): + instance: Instance = linode_with_linode_interfaces + vpc, subnet = create_vpc_with_subnet + + def __assert_base(iface: LinodeInterface): + assert iface.id is not None + assert iface.linode_id == instance.id + + assert iface.created is not None + assert iface.updated is not None + + assert isinstance(iface.mac_address, str) + assert iface.version + + def __assert_public(iface: LinodeInterface): + __assert_base(iface) + + assert iface.default_route.ipv4 + assert iface.default_route.ipv6 + + assert iface.public.ipv4.addresses[0].address == instance.ipv4[0] + assert iface.public.ipv4.addresses[0].primary + assert len(iface.public.ipv4.shared) == 0 + + assert iface.public.ipv6.slaac[0].address == instance.ipv6.split("/")[0] + assert iface.public.ipv6.slaac[0].prefix == 64 + assert len(iface.public.ipv6.shared) == 0 + assert len(iface.public.ipv6.ranges) == 0 + + def __assert_vpc(iface: LinodeInterface): + __assert_base(iface) + + assert not iface.default_route.ipv4 + assert not iface.default_route.ipv6 + + assert iface.vpc.vpc_id == vpc.id + assert iface.vpc.subnet_id == subnet.id + + assert ipaddress.ip_address( + iface.vpc.ipv4.addresses[0].address + ) in ipaddress.ip_network(subnet.ipv4) + assert iface.vpc.ipv4.addresses[0].primary + assert iface.vpc.ipv4.addresses[0].nat_1_1_address is None + + assert len(iface.vpc.ipv4.ranges) == 0 + + slaac_entry = iface.vpc.ipv6.slaac[0] + assert ipaddress.ip_address( + slaac_entry.address + ) in ipaddress.ip_network(slaac_entry.range) + assert not iface.vpc.ipv6.is_public + assert len(iface.vpc.ipv6.ranges) == 0 + + def __assert_vlan(iface: LinodeInterface): + __assert_base(iface) + + assert not iface.default_route.ipv4 + assert not iface.default_route.ipv6 + + assert iface.vlan.vlan_label == "test-vlan" + assert iface.vlan.ipam_address == "10.0.0.5/32" + + __assert_public(instance.linode_interfaces[0]) + __assert_vpc(instance.linode_interfaces[1]) + __assert_vlan(instance.linode_interfaces[2]) + + instance.invalidate() + + __assert_public(instance.linode_interfaces[0]) + __assert_vpc(instance.linode_interfaces[1]) + __assert_vlan(instance.linode_interfaces[2]) + + +@pytest.fixture +def linode_interface_public( + test_linode_client, + e2e_test_firewall, + linode_with_interface_generation_linode, +): + instance: Instance = linode_with_interface_generation_linode + + ipv6_range = test_linode_client.networking.ipv6_range_allocate( + 64, linode=instance.id + ) + + yield instance.interface_create( + firewall_id=e2e_test_firewall.id, + default_route=LinodeInterfaceDefaultRouteOptions( + ipv4=True, + ipv6=True, + ), + public=LinodeInterfacePublicOptions( + ipv4=LinodeInterfacePublicIPv4Options( + addresses=[ + LinodeInterfacePublicIPv4AddressOptions( + address=instance.ips.ipv4.public[0].address, + primary=True, + ) + ] + ), + ipv6=LinodeInterfacePublicIPv6Options( + ranges=[ + LinodeInterfacePublicIPv6RangeOptions( + range=ipv6_range.range, + ) + ] + ), + ), + ), instance, ipv6_range + + +@pytest.fixture +def linode_interface_vpc( + test_linode_client, + e2e_test_firewall, + linode_with_interface_generation_linode, + create_vpc_with_subnet, +): + instance: Instance = linode_with_interface_generation_linode + vpc, subnet = create_vpc_with_subnet + + yield instance.interface_create( + firewall_id=e2e_test_firewall.id, + default_route=LinodeInterfaceDefaultRouteOptions( + ipv4=True, + ), + vpc=LinodeInterfaceVPCOptions( + subnet_id=subnet.id, + ipv4=LinodeInterfaceVPCIPv4Options( + addresses=[ + LinodeInterfaceVPCIPv4AddressOptions( + address="auto", + primary=True, + nat_1_1_address=None, + ) + ], + ranges=[ + LinodeInterfaceVPCIPv4RangeOptions( + range="/32", + ) + ], + ), + ), + ), instance, vpc, subnet + + +@pytest.fixture +def linode_interface_vlan( + test_linode_client, + e2e_test_firewall, + linode_with_interface_generation_linode, + create_vpc_with_subnet, +): + instance: Instance = linode_with_interface_generation_linode + + yield instance.interface_create( + vlan=LinodeInterfaceVLANOptions( + vlan_label="test-vlan", ipam_address="10.0.0.5/32" + ), + ), instance + + +def test_linode_interface_create_public(linode_interface_public): + iface, instance, ipv6_range = linode_interface_public + + assert iface.id is not None + assert iface.linode_id == instance.id + + assert iface.created is not None + assert iface.updated is not None + + assert isinstance(iface.mac_address, str) + assert iface.version + + assert iface.default_route.ipv4 + assert iface.default_route.ipv6 + + assert ( + iface.public.ipv4.addresses[0].address + == instance.ips.ipv4.public[0].address + ) + assert iface.public.ipv4.addresses[0].primary + assert len(iface.public.ipv4.shared) == 0 + + assert iface.public.ipv6.ranges[0].range == ipv6_range.range + assert ( + iface.public.ipv6.ranges[0].route_target == instance.ipv6.split("/")[0] + ) + assert iface.public.ipv6.slaac[0].address == instance.ipv6.split("/")[0] + assert iface.public.ipv6.slaac[0].prefix == 64 + assert len(iface.public.ipv6.shared) == 0 + + +def test_linode_interface_update_public(linode_interface_public): + iface, instance, ipv6_range = linode_interface_public + + old_public_ipv4 = copy.deepcopy(iface.public.ipv4) + + iface.public.ipv4.addresses += [ + LinodeInterfacePublicIPv4AddressOptions(address="auto", primary=True) + ] + iface.public.ipv4.addresses[0].primary = False + + iface.public.ipv6.ranges[0].range = "/64" + + iface.save() + + iface.invalidate() + + assert len(iface.public.ipv4.addresses) == 2 + + address = iface.public.ipv4.addresses[0] + assert address.address == old_public_ipv4.addresses[0].address + assert not address.primary + + address = iface.public.ipv4.addresses[1] + assert ipaddress.ip_address(address.address) + assert address.primary + + assert len(iface.public.ipv6.ranges) == 1 + + range = iface.public.ipv6.ranges[0] + assert len(range.range) > 0 + assert ipaddress.ip_network(range.range) + + +def test_linode_interface_create_vpc(linode_interface_vpc): + iface, instance, vpc, subnet = linode_interface_vpc + + assert iface.id is not None + assert iface.linode_id == instance.id + + assert iface.created is not None + assert iface.updated is not None + + assert isinstance(iface.mac_address, str) + assert iface.version + + assert iface.default_route.ipv4 + assert iface.default_route.ipv6 + + assert iface.vpc.vpc_id == vpc.id + assert iface.vpc.subnet_id == subnet.id + + assert len(iface.vpc.ipv4.addresses[0].address) > 0 + assert iface.vpc.ipv4.addresses[0].primary + + assert iface.vpc.ipv4.addresses[0].nat_1_1_address is None + + assert iface.vpc.ipv4.ranges[0].range.split("/")[1] == "32" + + assert iface.default_route.ipv6 + ipv6 = iface.vpc.ipv6 + assert ipv6 and ipv6.is_public is False + + if ipv6.slaac: + assert ipv6.ranges == [] and len(ipv6.slaac) == 1 + assert ipv6.slaac[0].range and ipv6.slaac[0].address + elif ipv6.ranges: + assert ipv6.slaac == [] and len(ipv6.ranges) > 0 + + +def test_linode_interface_update_vpc(linode_interface_vpc): + iface, instance, vpc, subnet = linode_interface_vpc + + iface.vpc.subnet_id = 0 + + try: + iface.save() + except ApiError: + pass + else: + raise Exception("Expected error when updating subnet_id to 0") + + iface.invalidate() + + old_ipv4 = copy.deepcopy(iface.vpc.ipv4) + + iface.vpc.ipv4.addresses[0].address = "auto" + iface.vpc.ipv4.ranges += [ + LinodeInterfaceVPCIPv4RangeOptions( + range="/32", + ) + ] + + iface.save() + iface.invalidate() + + address = iface.vpc.ipv4.addresses[0] + assert ipaddress.ip_address(address.address) + + range = iface.vpc.ipv4.ranges[0] + assert ipaddress.ip_network(range.range) + assert range.range == old_ipv4.ranges[0].range + + range = iface.vpc.ipv4.ranges[1] + assert ipaddress.ip_network(range.range) + assert range.range != old_ipv4.ranges[0].range + + +def test_linode_interface_create_vlan( + linode_interface_vlan, +): + iface, instance = linode_interface_vlan + + assert iface.id is not None + assert iface.linode_id == instance.id + + assert iface.created is not None + assert iface.updated is not None + + assert isinstance(iface.mac_address, str) + assert iface.version + + assert not iface.default_route.ipv4 + assert not iface.default_route.ipv6 + + assert iface.vlan.vlan_label == "test-vlan" + assert iface.vlan.ipam_address == "10.0.0.5/32" + + +# NOTE: VLAN interface updates current aren't supported + + +def test_linode_interface_firewalls(e2e_test_firewall, linode_interface_public): + iface, instance, ipv6_range = linode_interface_public + + assert iface.id is not None + assert iface.linode_id == instance.id + + firewalls = iface.firewalls() + + firewall = firewalls[0] + assert firewall.id == e2e_test_firewall.id + assert firewall.label == e2e_test_firewall.label diff --git a/test/integration/models/linode/test_linode.py b/test/integration/models/linode/test_linode.py new file mode 100644 index 000000000..9f6194fa9 --- /dev/null +++ b/test/integration/models/linode/test_linode.py @@ -0,0 +1,1149 @@ +import ipaddress +import time +from test.integration.conftest import get_region +from test.integration.helpers import ( + get_test_label, + retry_sending_request, + send_request_when_resource_available, + wait_for_condition, +) + +import pytest + +from linode_api4.errors import ApiError +from linode_api4.objects import ( + Config, + ConfigInterface, + ConfigInterfaceIPv4, + Disk, + Instance, + InterfaceGeneration, + LinodeInterface, + Type, +) +from linode_api4.objects.linode import InstanceDiskEncryptionType, MigrationType + + +@pytest.fixture(scope="session") +def linode_with_volume_firewall(test_linode_client): + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label() + + rules = { + "outbound": [], + "outbound_policy": "DROP", + "inbound": [], + "inbound_policy": "DROP", + } + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label + "_modlinode", + ) + + volume = client.volume_create( + label=label + "_volume", + region=linode_instance.region.id, + linode=linode_instance.id, + ) + + firewall = client.networking.firewall_create( + label=label + "_firewall", rules=rules, status="enabled" + ) + + firewall.device_create(int(linode_instance.id)) + + yield linode_instance + + firewall.delete() + + volume.detach() + # wait for volume detach, can't currently get the attached/unattached status via SDK + time.sleep(30) + + volume.delete() + + linode_instance.delete() + + +@pytest.fixture(scope="function") +def linode_for_legacy_interface_tests(test_linode_client, e2e_test_firewall): + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label(length=8) + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label, + firewall=e2e_test_firewall, + interface_generation=InterfaceGeneration.LEGACY_CONFIG, + ) + + yield linode_instance + + linode_instance.delete() + + +@pytest.fixture(scope="function") +def linode_and_vpc_for_legacy_interface_tests_offline( + test_linode_client, create_vpc_with_subnet, e2e_test_firewall +): + vpc, subnet = create_vpc_with_subnet + + label = get_test_label(length=8) + + instance, password = test_linode_client.linode.instance_create( + "g6-standard-1", + vpc.region, + booted=False, + image="linode/debian11", + label=label, + firewall=e2e_test_firewall, + interface_generation=InterfaceGeneration.LEGACY_CONFIG, + ) + + yield vpc, subnet, instance, password + + instance.delete() + + +@pytest.fixture(scope="session") +def linode_for_vpu_tests(test_linode_client, e2e_test_firewall): + client = test_linode_client + region = "us-lax" + + label = get_test_label(length=8) + + linode_instance, password = client.linode.instance_create( + "g1-accelerated-netint-vpu-t1u1-s", + region, + image="linode/debian12", + label=label, + firewall=e2e_test_firewall, + ) + + yield linode_instance + + linode_instance.delete() + + +@pytest.fixture +def linode_for_disk_tests(test_linode_client, e2e_test_firewall): + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label() + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/alpine3.19", + label=label + "_long_tests", + firewall=e2e_test_firewall, + ) + + # Provisioning time + wait_for_condition(10, 300, get_status, linode_instance, "running") + + send_request_when_resource_available(300, linode_instance.shutdown) + + wait_for_condition(10, 100, get_status, linode_instance, "offline") + + # Now it allocates 100% disk space hence need to clear some space for tests + send_request_when_resource_available(300, linode_instance.disks[1].delete) + + test_linode_client.polling.event_poller_create( + "linode", "disk_delete", entity_id=linode_instance.id + ) + + yield linode_instance + + linode_instance.delete() + + +@pytest.fixture +def linode_with_block_storage_encryption(test_linode_client, e2e_test_firewall): + client = test_linode_client + region = get_region(client, {"Linodes", "Block Storage Encryption"}) + label = get_test_label() + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/alpine3.19", + label=label + "block-storage-encryption", + firewall=e2e_test_firewall, + ) + + yield linode_instance + + linode_instance.delete() + + +@pytest.fixture +def create_linode_for_long_running_tests(test_linode_client, e2e_test_firewall): + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label() + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label + "_long_tests", + firewall=e2e_test_firewall, + ) + + yield linode_instance + + linode_instance.delete() + + +@pytest.fixture(scope="function") +def linode_with_disk_encryption(test_linode_client, request): + client = test_linode_client + + target_region = get_region(client, {"Disk Encryption"}) + label = get_test_label(length=8) + + disk_encryption = request.param + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + target_region, + image="linode/ubuntu24.10", + label=label, + booted=False, + disk_encryption=disk_encryption, + ) + + yield linode_instance + + linode_instance.delete() + + +# Test helper +def get_status(linode: Instance, status: str): + return linode.status == status + + +def instance_type_condition(linode: Instance, type: str): + return type in str(linode.type) + + +def test_get_linode(test_linode_client, linode_with_volume_firewall): + linode = test_linode_client.load(Instance, linode_with_volume_firewall.id) + + assert linode.label == linode_with_volume_firewall.label + assert linode.id == linode_with_volume_firewall.id + + +def test_get_vpu(test_linode_client, linode_for_vpu_tests): + linode = test_linode_client.load(Instance, linode_for_vpu_tests.id) + + assert linode.label == linode_for_vpu_tests.label + assert hasattr(linode.specs, "accelerated_devices") + + +def test_linode_transfer(test_linode_client, linode_with_volume_firewall): + linode = test_linode_client.load(Instance, linode_with_volume_firewall.id) + + transfer = linode.transfer + + assert "used" in str(transfer) + assert "quota" in str(transfer) + assert "billable" in str(transfer) + + +def test_linode_rebuild(test_linode_client): + client = test_linode_client + + region = get_region(client, {"Disk Encryption"}) + + label = get_test_label() + "_rebuild" + + linode, password = client.linode.instance_create( + "g6-nanode-1", region, image="linode/debian12", label=label + ) + + wait_for_condition(10, 100, get_status, linode, "running") + + retry_sending_request( + 3, + linode.rebuild, + "linode/debian12", + disk_encryption=InstanceDiskEncryptionType.disabled, + ) + + wait_for_condition(10, 300, get_status, linode, "rebuilding") + + assert linode.status == "rebuilding" + assert linode.image.id == "linode/debian12" + + assert linode.disk_encryption == InstanceDiskEncryptionType.disabled + + wait_for_condition(10, 300, get_status, linode, "running") + + assert linode.status == "running" + + linode.delete() + + +def test_linode_available_backups(create_linode): + linode = create_linode + + enable_backup = linode.enable_backups() + backups = linode.backups + + assert enable_backup + assert "enabled" in str(backups) + assert "available" in str(backups) + assert "schedule" in str(backups) + assert "last_successful" in str(backups) + + +def test_update_linode(create_linode): + linode = create_linode + new_label = get_test_label() + "_updated" + linode.label = new_label + linode.group = "new_group" + updated = linode.save() + + assert updated + assert linode.label == new_label + + +def test_delete_linode(test_linode_client): + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label() + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label + "_linode", + ) + + linode_instance.delete() + + +def test_linode_reboot(create_linode): + linode = create_linode + + wait_for_condition(3, 100, get_status, linode, "running") + + retry_sending_request(3, linode.reboot) + + wait_for_condition(3, 100, get_status, linode, "rebooting") + assert linode.status == "rebooting" + + wait_for_condition(3, 100, get_status, linode, "running") + assert linode.status == "running" + + +def test_linode_shutdown(create_linode): + linode = create_linode + + wait_for_condition(10, 100, get_status, linode, "running") + + retry_sending_request(3, linode.shutdown) + + wait_for_condition(10, 100, get_status, linode, "offline") + + assert linode.status == "offline" + + +def test_linode_boot(create_linode): + linode = create_linode + + if linode.status != "offline": + retry_sending_request(3, linode.shutdown) + wait_for_condition(3, 100, get_status, linode, "offline") + retry_sending_request(3, linode.boot) + else: + retry_sending_request(3, linode.boot) + + wait_for_condition(10, 100, get_status, linode, "running") + + assert linode.status == "running" + + +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_linode_resize(create_linode_for_long_running_tests): + linode = create_linode_for_long_running_tests + + wait_for_condition(10, 240, get_status, linode, "running") + + retry_sending_request(3, linode.resize, "g6-standard-6") + + wait_for_condition(10, 240, get_status, linode, "resizing") + + assert linode.status == "resizing" + + # Takes about 3-5 minute to resize, sometimes longer... + wait_for_condition(30, 600, get_status, linode, "running") + + assert linode.status == "running" + + +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_linode_resize_with_class( + test_linode_client, create_linode_for_long_running_tests +): + linode = create_linode_for_long_running_tests + ltype = Type(test_linode_client, "g6-standard-6") + + wait_for_condition(10, 100, get_status, linode, "running") + + time.sleep(5) + res = linode.resize(new_type=ltype) + + assert res + + wait_for_condition(10, 300, get_status, linode, "resizing") + + assert linode.status == "resizing" + + # Takes about 3-5 minute to resize, sometimes longer... + wait_for_condition(30, 600, get_status, linode, "running") + + assert linode.status == "running" + + +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_linode_resize_with_migration_type( + test_linode_client, + create_linode_for_long_running_tests, +): + linode = create_linode_for_long_running_tests + m_type = MigrationType.WARM + + wait_for_condition(10, 100, get_status, linode, "running") + + time.sleep(5) + + assert "g6-nanode-1" in str(linode.type) + assert linode.specs.disk == 25600 + + res = linode.resize(new_type="g6-standard-1", migration_type=m_type) + + if res: + # there is no resizing state in warm migration anymore hence wait for resizing and poll event + test_linode_client.polling.event_poller_create( + "linode", "linode_resize", entity_id=linode.id + ).wait_for_next_event_finished(interval=5, timeout=500) + + wait_for_condition( + 10, + 100, + get_status, + linode, + "running", + ) + else: + raise ApiError + + # reload resized linode + resized_linode = test_linode_client.load(Instance, linode.id) + + assert resized_linode.specs.disk == 51200 + + +def test_linode_boot_with_config(create_linode): + linode = create_linode + + wait_for_condition(10, 100, get_status, linode, "running") + retry_sending_request(3, linode.shutdown) + + wait_for_condition(30, 300, get_status, linode, "offline") + + config = linode.configs[0] + + retry_sending_request(3, linode.boot, config) + + wait_for_condition(10, 100, get_status, linode, "running") + + assert linode.status == "running" + + +def test_linode_firewalls(linode_with_volume_firewall): + linode = linode_with_volume_firewall + + firewalls = linode.firewalls() + + assert len(firewalls) > 0 + assert "firewall" in firewalls[0].label + + +def test_linode_apply_firewalls(linode_with_volume_firewall): + linode = linode_with_volume_firewall + + result = linode.apply_firewalls() + + assert result + + +def test_linode_volumes(linode_with_volume_firewall): + linode = linode_with_volume_firewall + + volumes = linode.volumes() + + assert len(volumes) > 0 + assert "_volume" in volumes[0].label + + +@pytest.mark.parametrize( + "linode_with_disk_encryption", ["disabled"], indirect=True +) +def test_linode_with_disk_encryption_disabled(linode_with_disk_encryption): + linode = linode_with_disk_encryption + + assert linode.disk_encryption == InstanceDiskEncryptionType.disabled + assert ( + linode.disks[0].disk_encryption == InstanceDiskEncryptionType.disabled + ) + + +def test_linode_with_block_storage_encryption( + linode_with_block_storage_encryption, +): + linode = linode_with_block_storage_encryption + assert "Block Storage Encryption" in linode.capabilities + + +def wait_for_disk_status(disk: Disk, timeout): + start_time = time.time() + while True: + try: + if disk.status == "ready": + return disk.status + except ApiError: + if time.time() - start_time > timeout: + raise TimeoutError("Wait for condition timeout error") + + +def test_disk_resize_and_duplicate(test_linode_client, linode_for_disk_tests): + linode = linode_for_disk_tests + + disk = linode.disks[0] + + send_request_when_resource_available(300, disk.resize, 5000) + + time.sleep(100) + + disk = test_linode_client.load(Disk, linode.disks[0].id, linode.id) + + assert disk.size == 5000 + + dup_disk = disk.duplicate() + + time.sleep(40) + + wait_for_disk_status(dup_disk, 120) + assert dup_disk.linode_id == linode.id + + +def test_linode_create_disk(test_linode_client, linode_for_disk_tests): + linode = test_linode_client.load(Instance, linode_for_disk_tests.id) + + disk = send_request_when_resource_available( + 300, + linode.disk_create, + size=500, + ) + + wait_for_disk_status(disk, 120) + + assert disk.linode_id == linode.id + + +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_linode_instance_password(create_linode_for_pass_reset): + linode = create_linode_for_pass_reset[0] + password = create_linode_for_pass_reset[1] + + wait_for_condition(10, 100, get_status, linode, "running") + + retry_sending_request(3, linode.shutdown) + + wait_for_condition(10, 200, get_status, linode, "offline") + + linode.reset_instance_root_password(root_password=password) + + linode.boot() + + wait_for_condition(10, 100, get_status, linode, "running") + + assert linode.status == "running" + + +def test_linode_ips(create_linode): + linode = create_linode + + ips = linode.ips + + assert ips.ipv4.public[0].address == linode.ipv4[0] + + +def test_linode_initate_migration(test_linode_client, e2e_test_firewall): + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label() + "_migration" + + linode, _ = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label, + firewall=e2e_test_firewall, + ) + + # Says it could take up to ~6 hrs for migration to fully complete + send_request_when_resource_available( + 300, + linode.initiate_migration, + region="us-central", + migration_type=MigrationType.COLD, + ) + + def get_linode_status(): + return linode.status == "offline" + + # To verify that Linode's status changed before deletion (during migration status is set to 'offline') + wait_for_condition(5, 120, get_linode_status) + + res = linode.delete() + + assert res + + +def test_linode_upgrade_interfaces( + linode_for_legacy_interface_tests, + linode_and_vpc_for_legacy_interface_tests_offline, +): + vpc, subnet, linode, _ = linode_and_vpc_for_legacy_interface_tests_offline + config = linode.configs[0] + + new_interfaces = [ + {"purpose": "public"}, + ConfigInterface( + purpose="vlan", label="cool-vlan", ipam_address="10.0.0.4/32" + ), + ConfigInterface( + purpose="vpc", + subnet_id=subnet.id, + primary=True, + ipv4=ConfigInterfaceIPv4(vpc="10.0.0.2", nat_1_1="any"), + ip_ranges=["10.0.0.5/32"], + ), + ] + config.interfaces = new_interfaces + + config.save() + + def __assert_base(iface: LinodeInterface): + assert iface.id is not None + assert iface.created is not None + assert iface.updated is not None + assert iface.version is not None + + assert len(iface.mac_address) > 0 + + def __assert_public(iface: LinodeInterface): + __assert_base(iface) + + assert not iface.default_route.ipv4 + assert not iface.default_route.ipv6 + + assert len(iface.public.ipv4.addresses) == 0 + assert len(iface.public.ipv4.shared) == 0 + + assert len(iface.public.ipv6.slaac) == 1 + assert iface.public.ipv6.slaac[0].address == linode.ipv6.split("/")[0] + + assert len(iface.public.ipv6.ranges) == 0 + assert len(iface.public.ipv6.shared) == 0 + + def __assert_vpc(iface: LinodeInterface): + __assert_base(iface) + + assert iface.default_route.ipv4 + assert iface.default_route.ipv6 + + assert iface.vpc.vpc_id == vpc.id + assert iface.vpc.subnet_id == subnet.id + + assert len(iface.vpc.ipv4.addresses) == 1 + assert iface.vpc.ipv4.addresses[0].address == "10.0.0.2" + assert iface.vpc.ipv4.addresses[0].primary + assert iface.vpc.ipv4.addresses[0].nat_1_1_address is not None + + assert len(iface.vpc.ipv4.ranges) == 1 + assert iface.vpc.ipv4.ranges[0].range == "10.0.0.5/32" + + assert len(iface.vpc.ipv6.slaac) == 1 + + ipaddress.IPv6Network(iface.vpc.ipv6.slaac[0].range) + ipaddress.IPv6Address(iface.vpc.ipv6.slaac[0].address) + + assert len(iface.vpc.ipv6.ranges) == 0 + assert iface.vpc.ipv6.is_public is False + + def __assert_vlan(iface: LinodeInterface): + __assert_base(iface) + + assert not iface.default_route.ipv4 + assert not iface.default_route.ipv6 + + assert iface.vlan.vlan_label == "cool-vlan" + assert iface.vlan.ipam_address == "10.0.0.4/32" + + result = linode.upgrade_interfaces(dry_run=True) + + assert result.dry_run + assert result.config_id == config.id + + __assert_public(result.interfaces[0]) + __assert_vlan(result.interfaces[1]) + __assert_vpc(result.interfaces[2]) + + result = linode.upgrade_interfaces(config=config) + + assert not result.dry_run + assert result.config_id == config.id + + __assert_public(linode.linode_interfaces[0]) + __assert_vlan(linode.linode_interfaces[1]) + __assert_vpc(linode.linode_interfaces[2]) + + +def test_linode_interfaces_settings(linode_with_linode_interfaces): + linode = linode_with_linode_interfaces + settings = linode.interfaces_settings + + assert settings.network_helper is not None + assert ( + settings.default_route.ipv4_interface_id + == linode.linode_interfaces[0].id + ) + assert settings.default_route.ipv4_eligible_interface_ids == [ + linode.linode_interfaces[0].id, + linode.linode_interfaces[1].id, + ] + + assert ( + settings.default_route.ipv6_interface_id + == linode.linode_interfaces[0].id + ) + assert settings.default_route.ipv6_eligible_interface_ids == [ + linode.linode_interfaces[0].id, + linode.linode_interfaces[1].id, + ] + + # Arbitrary updates + settings.network_helper = True + settings.default_route.ipv4_interface_id = linode.linode_interfaces[1].id + + settings.save() + settings.invalidate() + + # Assert updates + assert settings.network_helper is not None + assert ( + settings.default_route.ipv4_interface_id + == linode.linode_interfaces[1].id + ) + + +def test_config_update_interfaces(create_linode): + linode = create_linode + config = linode.configs[0] + + new_interfaces = [ + {"purpose": "public"}, + ConfigInterface( + purpose="vlan", label="cool-vlan", ipam_address="10.0.0.4/32" + ), + ] + config.interfaces = new_interfaces + + res = config.save() + config.invalidate() + + assert res + assert config.interfaces[0].purpose == "public" + assert config.interfaces[1].purpose == "vlan" + assert config.interfaces[1].label == "cool-vlan" + assert config.interfaces[1].ipam_address == "10.0.0.4/32" + + +def test_get_config(test_linode_client, create_linode): + linode = create_linode + + config = test_linode_client.load(Config, linode.configs[0].id, linode.id) + + assert config.id == linode.configs[0].id + + +def test_get_linode_types(test_linode_client): + types = test_linode_client.linode.types() + + ids = [i.id for i in types] + + assert len(types) > 0 + assert "g6-nanode-1" in ids + + for linode_type in types: + assert hasattr(linode_type, "accelerated_devices") + + +def test_get_linode_types_overrides(test_linode_client): + types = test_linode_client.linode.types() + + target_types = [ + v + for v in types + if len(v.region_prices) > 0 and v.region_prices[0].hourly > 0 + ] + + assert len(target_types) > 0 + + for linode_type in target_types: + assert linode_type.region_prices[0].hourly >= 0 + assert linode_type.region_prices[0].monthly >= 0 + + +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_save_linode_noforce(test_linode_client, create_linode): + linode = create_linode + old_label = linode.label + linode.label = old_label + "updated_no_force" + linode.save(force=False) + + linode = test_linode_client.load(Instance, linode.id) + + assert old_label != linode.label + + +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_save_linode_force(test_linode_client, create_linode): + linode = create_linode + old_label = linode.label + linode.label = old_label + "updated_force" + linode.save(force=True) + + linode = test_linode_client.load(Instance, linode.id) + + assert old_label != linode.label + + +class TestNetworkInterface: + def test_list(self, linode_for_legacy_interface_tests): + linode = linode_for_legacy_interface_tests + + config: Config = linode.configs[0] + + config.interface_create_public( + primary=True, + ) + + label = str(time.time_ns()) + "vlabel" + + config.interface_create_vlan(label=label, ipam_address="10.0.0.3/32") + + interface = config.network_interfaces + + assert interface[0].purpose == "public" + assert interface[0].primary + assert interface[1].purpose == "vlan" + assert interface[1].label == label + assert interface[1].ipam_address == "10.0.0.3/32" + + def test_create_public(self, linode_for_legacy_interface_tests): + linode = linode_for_legacy_interface_tests + + config: Config = linode.configs[0] + + config.interfaces = [] + config.save() + + interface = config.interface_create_public( + primary=True, + ) + + config.invalidate() + + assert interface.id == config.interfaces[0].id + assert interface.purpose == "public" + assert interface.primary + + def test_create_vlan(self, linode_for_legacy_interface_tests): + linode = linode_for_legacy_interface_tests + + config: Config = linode.configs[0] + + config.interfaces = [] + config.save() + + interface = config.interface_create_vlan( + label="testvlan", ipam_address="10.0.0.2/32" + ) + + config.invalidate() + + assert interface.id == config.interfaces[0].id + assert interface.purpose == "vlan" + assert interface.label == "testvlan" + assert interface.ipam_address == "10.0.0.2/32" + + def test_create_vpu(self, test_linode_client, linode_for_vpu_tests): + assert hasattr(linode_for_vpu_tests.specs, "accelerated_devices") + + def test_create_vpc( + self, + test_linode_client, + linode_and_vpc_for_legacy_interface_tests_offline, + ): + vpc, subnet, linode, _ = ( + linode_and_vpc_for_legacy_interface_tests_offline + ) + + config: Config = linode.configs[0] + + config.interfaces = [] + config.save() + + interface = config.interface_create_vpc( + subnet=subnet, + primary=True, + ipv4=ConfigInterfaceIPv4(vpc="10.0.0.3", nat_1_1="any"), + ip_ranges=["10.0.0.5/32"], + ) + + config.invalidate() + + assert interface.id == config.interfaces[0].id + assert interface.subnet.id == subnet.id + assert interface.purpose == "vpc" + assert interface.ipv4.vpc == "10.0.0.3" + assert interface.ipv4.nat_1_1 == linode.ipv4[0] + assert interface.ip_ranges == ["10.0.0.5/32"] + + vpc_ip = linode.ips.ipv4.vpc[0] + vpc_range_ip = linode.ips.ipv4.vpc[1] + + assert vpc_ip.nat_1_1 == linode.ips.ipv4.public[0].address + assert vpc_ip.address_range is None + assert vpc_ip.vpc_id == vpc.id + assert vpc_ip.subnet_id == subnet.id + assert vpc_ip.config_id == config.id + assert vpc_ip.interface_id == interface.id + assert not vpc_ip.active + + assert vpc_range_ip.address_range == "10.0.0.5/32" + assert not vpc_range_ip.active + + assert isinstance(vpc.ipv6, list) + assert len(vpc.ipv6) > 0 + assert isinstance(vpc.ipv6[0].range, str) + assert ":" in vpc.ipv6[0].range + + # TODO:: Add `VPCIPAddress.filters.linode_id == linode.id` filter back + + # Attempt to resolve the IP from /vpcs/ips + all_vpc_ips = test_linode_client.vpcs.ips() + matched_ip = next( + ( + ip + for ip in all_vpc_ips + if ip.address == vpc_ip.address + and ip.vpc_id == vpc_ip.vpc_id + and ip.linode_id == vpc_ip.linode_id + ), + None, + ) + + assert ( + matched_ip is not None + ), f"Expected VPC IP {vpc_ip.address} not found in /vpcs/ips" + assert matched_ip.dict == vpc_ip.dict + + # Test getting the ips under this specific VPC + vpc_ips = vpc.ips + + assert len(vpc_ips) > 0 + assert vpc_ips[0].vpc_id == vpc.id + assert vpc_ips[0].linode_id == linode.id + assert vpc_ips[0].nat_1_1 == linode.ips.ipv4.public[0].address + + # Validate VPC IPv6 IPs from /vpcs/ips + all_vpc_ipv6 = test_linode_client.get("/vpcs/ipv6s")["data"] + + # Find matching VPC IPv6 entry + matched_ipv6 = next( + ( + ip + for ip in all_vpc_ipv6 + if ip["vpc_id"] == vpc.id + and ip["linode_id"] == linode.id + and ip["interface_id"] == interface.id + and ip["subnet_id"] == subnet.id + ), + None, + ) + + assert ( + matched_ipv6 + ), f"No VPC IPv6 found for Linode {linode.id} in VPC {vpc.id}" + + assert matched_ipv6["ipv6_range"].count(":") >= 2 + assert not matched_ipv6["ipv6_is_public"] + + ipv6_addresses = matched_ipv6.get("ipv6_addresses", []) + assert ( + isinstance(ipv6_addresses, list) and ipv6_addresses + ), "No IPv6 addresses found" + + slaac = ipv6_addresses[0] + assert ( + isinstance(slaac.get("slaac_address"), str) + and ":" in slaac["slaac_address"] + ) + + def test_update_vpc( + self, + linode_and_vpc_for_legacy_interface_tests_offline, + ): + vpc, subnet, linode, _ = ( + linode_and_vpc_for_legacy_interface_tests_offline + ) + + config: Config = linode.configs[0] + + config.interfaces = [] + config.save() + + interface = config.interface_create_vpc( + subnet=subnet, + primary=True, + ip_ranges=["10.0.0.8/32"], + ) + + interface.primary = False + interface.ip_ranges = ["10.0.0.9/32"] + interface.ipv4.vpc = "10.0.0.3" + interface.ipv4.nat_1_1 = "any" + + interface.save() + interface.invalidate() + config.invalidate() + + assert interface.id == config.interfaces[0].id + assert interface.subnet.id == subnet.id + assert interface.purpose == "vpc" + assert interface.ipv4.vpc == "10.0.0.3" + assert interface.ipv4.nat_1_1 == linode.ipv4[0] + assert interface.ip_ranges == ["10.0.0.9/32"] + + def test_reorder(self, linode_for_legacy_interface_tests): + linode = linode_for_legacy_interface_tests + + config: Config = linode.configs[0] + + pub_interface = config.interface_create_public( + primary=True, + ) + + label = str(time.time_ns()) + "vlabel" + vlan_interface = config.interface_create_vlan( + label=label, ipam_address="10.0.0.3/32" + ) + + send_request_when_resource_available(300, linode.shutdown) + + interfaces = config.network_interfaces + interfaces.reverse() + + send_request_when_resource_available( + 300, config.interface_reorder, interfaces + ) + config.invalidate() + + assert [v.id for v in config.interfaces[:2]] == [ + vlan_interface.id, + pub_interface.id, + ] + + def test_delete_interface_containing_vpc( + self, create_vpc_with_subnet_and_linode + ): + vpc, subnet, linode, _ = create_vpc_with_subnet_and_linode + + config: Config = linode.configs[0] + + config.interfaces = [] + + # must power off linode before saving + send_request_when_resource_available(300, linode.shutdown) + + send_request_when_resource_available(60, config.save) + + interface = config.interface_create_vpc( + subnet=subnet, + primary=True, + ip_ranges=["10.0.0.8/32"], + ) + + result = interface.delete() + + # returns true when delete successful + assert result + + +def test_create_linode_with_maintenance_policy(test_linode_client): + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label() + + policies = client.maintenance.maintenance_policies() + assert policies, "No maintenance policies returned from API" + + non_default_policy = next((p for p in policies if not p.is_default), None) + assert non_default_policy, "No non-default maintenance policy available" + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label + "_with_policy", + maintenance_policy=non_default_policy.slug, + ) + + assert linode_instance.id is not None + assert linode_instance.label.startswith(label) + assert linode_instance.maintenance_policy == non_default_policy.slug + + linode_instance.delete() + + +def test_update_linode_maintenance_policy(create_linode, test_linode_client): + client = test_linode_client + linode = create_linode + + policies = client.maintenance.maintenance_policies() + assert policies, "No maintenance policies returned from API" + + non_default_policy = next((p for p in policies if not p.is_default), None) + assert non_default_policy, "No non-default maintenance policy found" + + linode.maintenance_policy_id = non_default_policy.slug + result = linode.save() + + linode.invalidate() + assert result + assert linode.maintenance_policy_id == non_default_policy.slug diff --git a/test/integration/models/lke/test_lke.py b/test/integration/models/lke/test_lke.py new file mode 100644 index 000000000..96ab1d3cc --- /dev/null +++ b/test/integration/models/lke/test_lke.py @@ -0,0 +1,520 @@ +import base64 +import re +from test.integration.conftest import get_region +from test.integration.helpers import ( + get_test_label, + send_request_when_resource_available, + wait_for_condition, +) +from typing import Any, Dict + +import pytest + +from linode_api4 import ( + LKEClusterControlPlaneACLAddressesOptions, + LKEClusterControlPlaneACLOptions, + LKEClusterControlPlaneOptions, + TieredKubeVersion, +) +from linode_api4.common import RegionPrice +from linode_api4.errors import ApiError +from linode_api4.objects import ( + LKECluster, + LKENodePool, + LKENodePoolTaint, + LKEType, +) +from linode_api4.objects.linode import InstanceDiskEncryptionType + + +@pytest.fixture(scope="session") +def lke_cluster(test_linode_client): + node_type = test_linode_client.linode.types()[1] # g6-standard-1 + version = test_linode_client.lke.versions()[0] + + region = get_region(test_linode_client, {"Kubernetes", "Disk Encryption"}) + + node_pools = test_linode_client.lke.node_pool(node_type, 3) + label = get_test_label() + "_cluster" + + cluster = test_linode_client.lke.cluster_create( + region, label, version, node_pools + ) + + yield cluster + + cluster.delete() + + +@pytest.fixture(scope="function") +def lke_cluster_with_acl(test_linode_client): + node_type = test_linode_client.linode.types()[1] # g6-standard-1 + version = test_linode_client.lke.versions()[0] + region = get_region(test_linode_client, {"Kubernetes"}) + node_pools = test_linode_client.lke.node_pool(node_type, 1) + label = get_test_label() + "_cluster" + + cluster = test_linode_client.lke.cluster_create( + region, + label, + version, + node_pools, + control_plane=LKEClusterControlPlaneOptions( + acl=LKEClusterControlPlaneACLOptions( + enabled=True, + addresses=LKEClusterControlPlaneACLAddressesOptions( + ipv4=["10.0.0.1/32"], ipv6=["1234::5678"] + ), + ) + ), + ) + + yield cluster + + cluster.delete() + + +# NOTE: This needs to be function-scoped because it is mutated in a test below. +@pytest.fixture(scope="function") +def lke_cluster_with_labels_and_taints(test_linode_client): + node_type = test_linode_client.linode.types()[1] # g6-standard-1 + version = test_linode_client.lke.versions()[0] + + region = get_region(test_linode_client, {"Kubernetes"}) + + node_pools = test_linode_client.lke.node_pool( + node_type, + 3, + labels={ + "foo.example.com/test": "bar", + "foo.example.com/test2": "test", + }, + taints=[ + LKENodePoolTaint( + key="foo.example.com/test", value="bar", effect="NoSchedule" + ), + { + "key": "foo.example.com/test2", + "value": "cool", + "effect": "NoExecute", + }, + ], + ) + label = get_test_label() + "_cluster" + + cluster = test_linode_client.lke.cluster_create( + region, label, version, node_pools + ) + + yield cluster + + cluster.delete() + + +@pytest.fixture(scope="session") +def lke_cluster_with_apl(test_linode_client): + version = test_linode_client.lke.versions()[0] + + region = get_region(test_linode_client, {"Kubernetes", "Disk Encryption"}) + + # NOTE: g6-dedicated-4 is the minimum APL-compatible Linode type + node_pools = test_linode_client.lke.node_pool("g6-dedicated-4", 3) + label = get_test_label() + "_cluster" + + cluster = test_linode_client.lke.cluster_create( + region, + label, + version, + node_pools, + control_plane=LKEClusterControlPlaneOptions( + high_availability=True, + ), + apl_enabled=True, + ) + + yield cluster + + cluster.delete() + + +@pytest.fixture(scope="session") +def lke_cluster_enterprise(e2e_test_firewall, test_linode_client): + # We use the oldest version here so we can test upgrades + version = sorted( + v.id for v in test_linode_client.lke.tier("enterprise").versions() + )[0] + + region = get_region( + test_linode_client, {"Kubernetes Enterprise", "Disk Encryption"} + ) + + node_pools = test_linode_client.lke.node_pool( + "g6-dedicated-2", + 3, + k8s_version=version, + update_strategy="rolling_update", + firewall_id=e2e_test_firewall.id, + ) + label = get_test_label() + "_cluster" + + cluster = test_linode_client.lke.cluster_create( + region, + label, + version, + node_pools, + tier="enterprise", + ) + + yield cluster + + cluster.delete() + + +def get_cluster_status(cluster: LKECluster, status: str): + return cluster._raw_json["status"] == status + + +def get_node_status(cluster: LKECluster, status: str): + node = cluster.pools[0].nodes[0] + return node.status == status + + +@pytest.mark.smoke +def test_get_lke_clusters(test_linode_client, lke_cluster): + cluster = test_linode_client.load(LKECluster, lke_cluster.id) + + assert cluster._raw_json == lke_cluster._raw_json + + +@pytest.mark.smoke +def test_get_lke_pool(test_linode_client, lke_cluster): + cluster = lke_cluster + + wait_for_condition( + 10, + 500, + get_node_status, + cluster, + "ready", + ) + + pool = test_linode_client.load(LKENodePool, cluster.pools[0].id, cluster.id) + + def _to_comparable(p: LKENodePool) -> Dict[str, Any]: + return {k: v for k, v in p._raw_json.items() if k not in {"nodes"}} + + assert _to_comparable(cluster.pools[0]) == _to_comparable(pool) + + assert pool.disk_encryption in ( + InstanceDiskEncryptionType.enabled, + InstanceDiskEncryptionType.disabled, + ) + + +def test_node_pool_create_with_disk_encryption(test_linode_client, lke_cluster): + node_type = test_linode_client.linode.types()[1] + + pool = lke_cluster.node_pool_create( + node_type, + 1, + disk_encryption=InstanceDiskEncryptionType.enabled, + ) + + try: + assert pool.disk_encryption == InstanceDiskEncryptionType.enabled + finally: + pool.delete() + + +def test_cluster_dashboard_url_view(lke_cluster): + cluster = lke_cluster + + url = send_request_when_resource_available( + 300, cluster.cluster_dashboard_url_view + ) + + assert re.search("https://+", url) + + +def test_get_and_delete_kubeconfig(lke_cluster): + cluster = lke_cluster + + kubeconfig_encoded = cluster.kubeconfig + + kubeconfig_decoded = base64.b64decode(kubeconfig_encoded).decode("utf-8") + + assert "kind: Config" in kubeconfig_decoded + + assert "apiVersion:" in kubeconfig_decoded + + res = send_request_when_resource_available(300, cluster.kubeconfig_delete) + + assert res is None + + +def test_lke_node_view(lke_cluster): + cluster = lke_cluster + node_id = cluster.pools[0].nodes[0].id + + node = cluster.node_view(node_id) + + assert node.status in ("ready", "not_ready") + assert node.id == node_id + assert node.instance_id + + +def test_lke_node_delete(lke_cluster): + cluster = lke_cluster + node_id = cluster.pools[0].nodes[0].id + + cluster.node_delete(node_id) + + with pytest.raises(ApiError) as err: + cluster.node_view(node_id) + assert "Not found" in str(err.json) + + +def test_lke_node_recycle(test_linode_client, lke_cluster): + cluster = test_linode_client.load(LKECluster, lke_cluster.id) + + node_id = cluster.pools[0].nodes[0].id + + send_request_when_resource_available(300, cluster.node_recycle, node_id) + + wait_for_condition(10, 300, get_node_status, cluster, "not_ready") + + node = cluster.pools[0].nodes[0] + assert node.status == "not_ready" + + # wait for provisioning + wait_for_condition( + 10, + 500, + get_node_status, + test_linode_client.load(LKECluster, lke_cluster.id), + "ready", + ) + + # Reload cluster + cluster = test_linode_client.load(LKECluster, lke_cluster.id) + + node = cluster.pools[0].nodes[0] + + assert node.status == "ready" + + +def test_lke_cluster_nodes_recycle(test_linode_client, lke_cluster): + cluster = lke_cluster + + send_request_when_resource_available(300, cluster.cluster_nodes_recycle) + + wait_for_condition( + 5, + 300, + get_node_status, + test_linode_client.load(LKECluster, cluster.id), + "not_ready", + ) + + node_pool = test_linode_client.load( + LKENodePool, cluster.pools[0].id, cluster.id + ) + node = node_pool.nodes[0] + assert node.status == "not_ready" + + +def test_service_token_delete(lke_cluster): + cluster = lke_cluster + + res = cluster.service_token_delete() + + assert res is None + + +def test_lke_cluster_acl(lke_cluster_with_acl): + cluster = lke_cluster_with_acl + + assert cluster.control_plane_acl.enabled + assert cluster.control_plane_acl.addresses.ipv4 == ["10.0.0.1/32"] + assert cluster.control_plane_acl.addresses.ipv6 == ["1234::5678/128"] + + acl = cluster.control_plane_acl_update( + LKEClusterControlPlaneACLOptions( + enabled=True, + addresses=LKEClusterControlPlaneACLAddressesOptions( + ipv4=["10.0.0.2/32"] + ), + ) + ) + + assert acl == cluster.control_plane_acl + assert acl.addresses.ipv4 == ["10.0.0.2/32"] + + +def test_lke_cluster_update_acl_null_addresses(lke_cluster_with_acl): + cluster = lke_cluster_with_acl + + # Addresses should not be included in the request if it's null, + # else an error will be returned by the API. + # See: TPT-3489 + acl = cluster.control_plane_acl_update( + {"enabled": False, "addresses": None} + ) + + assert acl == cluster.control_plane_acl + assert acl.addresses.ipv4 == [] + + +def test_lke_cluster_disable_acl(lke_cluster_with_acl): + cluster = lke_cluster_with_acl + + assert cluster.control_plane_acl.enabled + + acl = cluster.control_plane_acl_update( + LKEClusterControlPlaneACLOptions( + enabled=False, + ) + ) + + assert acl.enabled is False + assert acl == cluster.control_plane_acl + assert acl.addresses.ipv4 == [] + + cluster.control_plane_acl_delete() + + assert not cluster.control_plane_acl.enabled + + +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_lke_cluster_labels_and_taints(lke_cluster_with_labels_and_taints): + pool = lke_cluster_with_labels_and_taints.pools[0] + + assert vars(pool.labels) == { + "foo.example.com/test": "bar", + "foo.example.com/test2": "test", + } + + assert ( + LKENodePoolTaint( + key="foo.example.com/test", value="bar", effect="NoSchedule" + ) + in pool.taints + ) + + assert ( + LKENodePoolTaint( + key="foo.example.com/test2", value="cool", effect="NoExecute" + ) + in pool.taints + ) + + updated_labels = { + "foo.example.com/test": "bar", + "foo.example.com/test2": "cool", + } + + updated_taints = [ + LKENodePoolTaint( + key="foo.example.com/test", value="bar", effect="NoSchedule" + ), + { + "key": "foo.example.com/test2", + "value": "cool", + "effect": "NoExecute", + }, + ] + + pool.labels = updated_labels + pool.taints = updated_taints + + pool.save() + + # Invalidate the pool so we can assert on the refreshed values + pool.invalidate() + + assert vars(pool.labels) == updated_labels + assert updated_taints[0] in pool.taints + assert LKENodePoolTaint.from_json(updated_taints[1]) in pool.taints + + +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_lke_cluster_with_apl(lke_cluster_with_apl): + assert lke_cluster_with_apl.apl_enabled == True + assert ( + lke_cluster_with_apl.apl_console_url + == f"https://console.lke{lke_cluster_with_apl.id}.akamai-apl.net" + ) + assert ( + lke_cluster_with_apl.apl_health_check_url + == f"https://auth.lke{lke_cluster_with_apl.id}.akamai-apl.net/ready" + ) + + +def test_lke_cluster_enterprise( + e2e_test_firewall, + test_linode_client, + lke_cluster_enterprise, +): + lke_cluster_enterprise.invalidate() + assert lke_cluster_enterprise.tier == "enterprise" + + pool = lke_cluster_enterprise.pools[0] + assert str(pool.k8s_version) == lke_cluster_enterprise.k8s_version.id + assert pool.update_strategy == "rolling_update" + assert pool.firewall_id == e2e_test_firewall.id + + target_version = sorted( + v.id for v in test_linode_client.lke.tier("enterprise").versions() + )[0] + pool.update_strategy = "on_recycle" + pool.k8s_version = target_version + + pool.save() + + pool.invalidate() + + assert pool.k8s_version == target_version + assert pool.update_strategy == "on_recycle" + + +def test_lke_tiered_versions(test_linode_client): + def __assert_version(tier: str, version: TieredKubeVersion): + assert version.tier == tier + assert len(version.id) > 0 + + standard_versions = test_linode_client.lke.tier("standard").versions() + assert len(standard_versions) > 0 + + standard_version = standard_versions[0] + __assert_version("standard", standard_version) + + standard_version.invalidate() + __assert_version("standard", standard_version) + + enterprise_versions = test_linode_client.lke.tier("enterprise").versions() + assert len(enterprise_versions) > 0 + + enterprise_version = enterprise_versions[0] + __assert_version("enterprise", enterprise_version) + + enterprise_version.invalidate() + __assert_version("enterprise", enterprise_version) + + +def test_lke_types(test_linode_client): + types = test_linode_client.lke.types() + + if len(types) > 0: + for lke_type in types: + assert type(lke_type) is LKEType + assert lke_type.price.monthly is None or ( + isinstance(lke_type.price.monthly, (float, int)) + and lke_type.price.monthly >= 0 + ) + if len(lke_type.region_prices) > 0: + region_price = lke_type.region_prices[0] + assert type(region_price) is RegionPrice + assert lke_type.price.monthly is None or ( + isinstance(lke_type.price.monthly, (float, int)) + and lke_type.price.monthly >= 0 + ) diff --git a/test/integration/models/lock/__init__.py b/test/integration/models/lock/__init__.py new file mode 100644 index 000000000..1e07a34ee --- /dev/null +++ b/test/integration/models/lock/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left empty to make the directory a Python package. diff --git a/test/integration/models/lock/test_lock.py b/test/integration/models/lock/test_lock.py new file mode 100644 index 000000000..f2139a176 --- /dev/null +++ b/test/integration/models/lock/test_lock.py @@ -0,0 +1,151 @@ +from test.integration.conftest import get_region +from test.integration.helpers import ( + get_test_label, + send_request_when_resource_available, +) + +import pytest + +from linode_api4.objects import Lock, LockType + + +@pytest.fixture(scope="function") +def linode_for_lock(test_linode_client, e2e_test_firewall): + """ + Create a Linode instance for testing locks. + """ + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label(length=8) + + linode_instance, _ = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/debian12", + label=label, + firewall=e2e_test_firewall, + ) + + yield linode_instance + + # Clean up any locks on the Linode before deleting it + locks = client.locks() + for lock in locks: + if ( + lock.entity.id == linode_instance.id + and lock.entity.type == "linode" + ): + lock.delete() + + send_request_when_resource_available( + timeout=100, func=linode_instance.delete + ) + + +@pytest.fixture(scope="function") +def test_lock(test_linode_client, linode_for_lock): + """ + Create a lock for testing. + """ + lock = test_linode_client.locks.create( + entity_type="linode", + entity_id=linode_for_lock.id, + lock_type=LockType.cannot_delete, + ) + + yield lock + + # Clean up lock if it still exists + try: + lock.delete() + except Exception: + pass # Lock may have been deleted by the test + + +@pytest.mark.smoke +def test_get_lock(test_linode_client, test_lock): + """ + Test that a lock can be retrieved by ID. + """ + lock = test_linode_client.load(Lock, test_lock.id) + + assert lock.id == test_lock.id + assert lock.lock_type == "cannot_delete" + assert lock.entity is not None + assert lock.entity.type == "linode" + + +def test_list_locks(test_linode_client, test_lock): + """ + Test that locks can be listed. + """ + locks = test_linode_client.locks() + + assert len(locks) > 0 + + # Verify our test lock is in the list + lock_ids = [lock.id for lock in locks] + assert test_lock.id in lock_ids + + +def test_create_lock_cannot_delete(test_linode_client, linode_for_lock): + """ + Test creating a cannot_delete lock. + """ + lock = test_linode_client.locks.create( + entity_type="linode", + entity_id=linode_for_lock.id, + lock_type=LockType.cannot_delete, + ) + + assert lock.id is not None + assert lock.lock_type == "cannot_delete" + assert lock.entity.id == linode_for_lock.id + assert lock.entity.type == "linode" + assert lock.entity.label == linode_for_lock.label + + # Clean up + lock.delete() + + +def test_create_lock_cannot_delete_with_subresources( + test_linode_client, linode_for_lock +): + """ + Test creating a cannot_delete_with_subresources lock. + """ + lock = test_linode_client.locks.create( + entity_type="linode", + entity_id=linode_for_lock.id, + lock_type=LockType.cannot_delete_with_subresources, + ) + + assert lock.id is not None + assert lock.lock_type == "cannot_delete_with_subresources" + assert lock.entity.id == linode_for_lock.id + assert lock.entity.type == "linode" + + # Clean up + lock.delete() + + +def test_delete_lock(test_linode_client, linode_for_lock): + """ + Test that a lock can be deleted using the Lock object's delete method. + """ + # Create a lock + lock = test_linode_client.locks.create( + entity_type="linode", + entity_id=linode_for_lock.id, + lock_type=LockType.cannot_delete, + ) + + lock_id = lock.id + + # Delete the lock using the object method + lock.delete() + + # Verify the lock no longer exists + locks = test_linode_client.locks() + lock_ids = [lk.id for lk in locks] + assert lock_id not in lock_ids diff --git a/test/integration/models/longview/test_longview.py b/test/integration/models/longview/test_longview.py new file mode 100644 index 000000000..6a6855460 --- /dev/null +++ b/test/integration/models/longview/test_longview.py @@ -0,0 +1,76 @@ +import re +import time +from test.integration.helpers import get_test_label + +import pytest + +from linode_api4.objects import ( + ApiError, + LongviewClient, + LongviewPlan, + LongviewSubscription, +) + + +@pytest.mark.smoke +def test_get_longview_client(test_linode_client, test_longview_client): + longview = test_linode_client.load(LongviewClient, test_longview_client.id) + + assert longview.id == test_longview_client.id + + +def test_update_longview_label(test_linode_client, test_longview_client): + longview = test_linode_client.load(LongviewClient, test_longview_client.id) + old_label = longview.label + + label = get_test_label(10) + + longview.label = label + + longview.save() + + assert longview.label != old_label + + +def test_delete_client(test_linode_client, test_longview_client): + client = test_linode_client + label = get_test_label(length=8) + longview_client = client.longview.client_create(label=label) + + time.sleep(5) + + res = longview_client.delete() + + assert res + + +def test_get_longview_subscription(test_linode_client, test_longview_client): + subs = test_linode_client.longview.subscriptions() + sub = test_linode_client.load(LongviewSubscription, subs[0].id) + + assert "clients_included" in str(subs.first().__dict__) + + assert re.search("[0-9]+", str(sub.price.hourly)) + assert re.search("[0-9]+", str(sub.price.monthly)) + + assert "longview-3" in str(subs.lists) + assert "longview-10" in str(subs.lists) + assert "longview-40" in str(subs.lists) + assert "longview-100" in str(subs.lists) + + +def test_longview_plan_update_method_not_allowed(test_linode_client): + try: + test_linode_client.longview.longview_plan_update("longview-100") + except ApiError as e: + assert e.status == 405 + assert "Method Not Allowed" in str(e) + + +def test_get_current_longview_plan(test_linode_client): + lv_plan = test_linode_client.load(LongviewPlan, "") + + if lv_plan.label is not None: + assert "Longview" in lv_plan.label + assert "hourly" in lv_plan.price.dict + assert "monthly" in lv_plan.price.dict diff --git a/test/integration/models/maintenance/test_maintenance.py b/test/integration/models/maintenance/test_maintenance.py new file mode 100644 index 000000000..509d06cf6 --- /dev/null +++ b/test/integration/models/maintenance/test_maintenance.py @@ -0,0 +1,12 @@ +def test_get_maintenance_policies(test_linode_client): + client = test_linode_client + + policies = client.maintenance.maintenance_policies() + + assert isinstance(policies, list) + assert all(hasattr(p, "slug") for p in policies) + + slugs = [p.slug for p in policies] + assert any( + slug in slugs for slug in ["linode/migrate", "linode/power_off_on"] + ) diff --git a/test/integration/models/monitor/test_monitor.py b/test/integration/models/monitor/test_monitor.py new file mode 100644 index 000000000..908ac1a44 --- /dev/null +++ b/test/integration/models/monitor/test_monitor.py @@ -0,0 +1,277 @@ +import time +from test.integration.helpers import ( + get_test_label, + send_request_when_resource_available, + wait_for_condition, +) + +import pytest + +from linode_api4 import LinodeClient +from linode_api4.objects import ( + AlertDefinition, + ApiError, + MonitorDashboard, + MonitorMetricsDefinition, + MonitorService, + MonitorServiceToken, +) +from linode_api4.objects.monitor import AlertStatus + + +# List all dashboards +def test_get_all_dashboards(test_linode_client): + client = test_linode_client + dashboards = client.monitor.dashboards() + assert isinstance(dashboards[0], MonitorDashboard) + + dashboard_get = dashboards[0] + get_service_type = dashboard_get.service_type + + # Fetch Dashboard by ID + dashboard_by_id = client.load(MonitorDashboard, 1) + assert isinstance(dashboard_by_id, MonitorDashboard) + assert dashboard_by_id.id == 1 + + # #Fetch Dashboard by service_type + dashboards_by_svc = client.monitor.dashboards(service_type=get_service_type) + assert isinstance(dashboards_by_svc[0], MonitorDashboard) + assert dashboards_by_svc[0].service_type == get_service_type + + +def test_filter_and_group_by(test_linode_client): + client = test_linode_client + dashboards_by_svc = client.monitor.dashboards(service_type="linode") + assert isinstance(dashboards_by_svc[0], MonitorDashboard) + + # Get the first dashboard for linode service type + dashboard = dashboards_by_svc[0] + assert dashboard.service_type == "linode" + + # Ensure the dashboard has widgets + assert hasattr( + dashboard, "widgets" + ), "Dashboard should have widgets attribute" + assert dashboard.widgets is not None, "Dashboard widgets should not be None" + assert ( + len(dashboard.widgets) > 0 + ), "Dashboard should have at least one widget" + + # Test the first widget's group_by and filters fields + widget = dashboard.widgets[0] + + # Test group_by field type + group_by = widget.group_by + assert group_by is None or isinstance( + group_by, list + ), "group_by should be None or list type" + if group_by is not None: + for item in group_by: + assert isinstance(item, str), "group_by items should be strings" + + # Test filters field type + filters = widget.filters + assert filters is None or isinstance( + filters, list + ), "filters should be None or list type" + if filters is not None: + from linode_api4.objects.monitor import Filter + + for filter_item in filters: + assert isinstance( + filter_item, Filter + ), "filter items should be Filter objects" + assert hasattr( + filter_item, "dimension_label" + ), "Filter should have dimension_label" + assert hasattr( + filter_item, "operator" + ), "Filter should have operator" + assert hasattr(filter_item, "value"), "Filter should have value" + + +# List supported services +def test_get_supported_services(test_linode_client): + client = test_linode_client + supported_services = client.monitor.services() + assert isinstance(supported_services[0], MonitorService) + + get_supported_service = supported_services[0].service_type + + # Get details for a particular service + service_details = client.load(MonitorService, get_supported_service) + assert isinstance(service_details, MonitorService) + assert service_details.service_type == get_supported_service + + # Get Metric definition details for that particular service + metric_definitions = client.monitor.metric_definitions( + service_type=get_supported_service + ) + assert isinstance(metric_definitions[0], MonitorMetricsDefinition) + + +def test_get_not_supported_service(test_linode_client): + client = test_linode_client + with pytest.raises(RuntimeError) as err: + client.load(MonitorService, "saas") + assert "[404] Not found" in str(err.value) + + +# Test Helpers +def get_db_engine_id(client: LinodeClient, engine: str): + engines = client.database.engines() + engine_id = "" + for e in engines: + if e.engine == engine: + engine_id = e.id + + return str(engine_id) + + +@pytest.fixture(scope="session") +def test_create_and_test_db(test_linode_client): + client = test_linode_client + label = get_test_label() + "-sqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "mysql") + dbtype = "g6-standard-1" + + db = client.database.mysql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + ) + + def get_db_status(): + return db.status == "active" + + # TAKES 15-30 MINUTES TO FULLY PROVISION DB + wait_for_condition(60, 2000, get_db_status) + + yield db + send_request_when_resource_available(300, db.delete) + + +def test_my_db_functionality(test_linode_client, test_create_and_test_db): + client = test_linode_client + assert test_create_and_test_db.status == "active" + + entity_id = test_create_and_test_db.id + + # create token for the particular service + token = client.monitor.create_token( + service_type="dbaas", entity_ids=[entity_id] + ) + assert isinstance(token, MonitorServiceToken) + assert len(token.token) > 0, "Token should not be empty" + assert hasattr(token, "token"), "Response object has no 'token' attribute" + + +def test_integration_create_get_update_delete_alert_definition( + test_linode_client, +): + """E2E: create an alert definition, fetch it, update it, then delete it. + + This test attempts to be resilient: it cleans up the created definition + in a finally block so CI doesn't leak resources. + """ + client = test_linode_client + service_type = "dbaas" + label = get_test_label() + "-e2e-alert" + + rule_criteria = { + "rules": [ + { + "aggregate_function": "avg", + "dimension_filters": [ + { + "dimension_label": "node_type", + "label": "Node Type", + "operator": "eq", + "value": "primary", + } + ], + "label": "Memory Usage", + "metric": "memory_usage", + "operator": "gt", + "threshold": 90, + "unit": "percent", + } + ] + } + trigger_conditions = { + "criteria_condition": "ALL", + "evaluation_period_seconds": 300, + "polling_interval_seconds": 300, + "trigger_occurrences": 1, + } + + # Make the label unique and ensure it begins/ends with an alphanumeric char + label = f"{label}-{int(time.time())}" + description = "E2E alert created by SDK integration test" + + # Pick an existing alert channel to attach to the definition; skip if none + channels = list(client.monitor.alert_channels()) + if not channels: + pytest.skip( + "No alert channels available on account for creating alert definitions" + ) + + created = None + + def wait_for_alert_ready(alert_id, service_type: str): + timeout = 360 # maximum time in seconds to wait for alert creation + initial_timeout = 1 + start = time.time() + interval = initial_timeout + alert = client.load(AlertDefinition, alert_id, service_type) + while ( + getattr(alert, "status", None) + != AlertStatus.AlertDefinitionStatusEnabled + and (time.time() - start) < timeout + ): + time.sleep(interval) + interval *= 2 + try: + alert._api_get() + except ApiError as e: + # transient errors while polling; continue until timeout + if e.status != 404: + raise + return alert + + try: + # Create the alert definition using API-compliant top-level fields + created = client.monitor.create_alert_definition( + service_type=service_type, + label=label, + severity=1, + description=description, + channel_ids=[channels[0].id], + rule_criteria=rule_criteria, + trigger_conditions=trigger_conditions, + ) + + assert created.id + assert getattr(created, "label", None) == label + + created = wait_for_alert_ready(created.id, service_type) + + updated = client.load(AlertDefinition, created.id, service_type) + updated.label = f"{label}-updated" + updated.save() + + updated = wait_for_alert_ready(updated.id, service_type) + + assert created.id == updated.id + assert updated.label == f"{label}-updated" + + finally: + if created: + # Best-effort cleanup; allow transient errors. + delete_alert = client.load( + AlertDefinition, created.id, service_type + ) + delete_alert.delete() diff --git a/test/integration/models/monitor_api/test_monitor_api.py b/test/integration/models/monitor_api/test_monitor_api.py new file mode 100644 index 000000000..d9fd755b3 --- /dev/null +++ b/test/integration/models/monitor_api/test_monitor_api.py @@ -0,0 +1,11 @@ +def test_monitor_api_fetch_dbaas_metrics(test_monitor_client): + client, entity_ids = test_monitor_client + + metrics = client.metrics.fetch_metrics( + "dbaas", + entity_ids=entity_ids, + metrics=[{"name": "read_iops", "aggregate_function": "avg"}], + relative_time_duration={"unit": "hr", "value": 1}, + ) + + assert metrics.status == "success" diff --git a/test/integration/models/networking/test_networking.py b/test/integration/models/networking/test_networking.py new file mode 100644 index 000000000..27ffbb444 --- /dev/null +++ b/test/integration/models/networking/test_networking.py @@ -0,0 +1,353 @@ +import time +from test.integration.conftest import ( + get_api_ca_file, + get_api_url, + get_region, + get_token, +) +from test.integration.helpers import ( + get_test_label, + retry_sending_request, + wait_for_condition, +) + +import pytest + +from linode_api4 import Instance, LinodeClient +from linode_api4.objects import Config, ConfigInterfaceIPv4, Firewall, IPAddress +from linode_api4.objects.networking import ( + FirewallCreateDevicesOptions, + NetworkTransferPrice, + Price, +) + +TEST_REGION = get_region( + LinodeClient( + token=get_token(), + base_url=get_api_url(), + ca_path=get_api_ca_file(), + ), + {"Linodes", "Cloud Firewall"}, + site_type="core", +) + + +def create_linode_func(test_linode_client): + client = test_linode_client + + label = get_test_label() + + linode_instance, _ = client.linode.instance_create( + "g6-nanode-1", + TEST_REGION, + image="linode/debian12", + label=label, + ) + + return linode_instance + + +@pytest.fixture +def create_linode_for_ip_share(test_linode_client): + linode = create_linode_func(test_linode_client) + + yield linode + + linode.delete() + + +@pytest.fixture +def create_linode_to_be_shared_with_ips(test_linode_client): + linode = create_linode_func(test_linode_client) + + yield linode + + linode.delete() + + +@pytest.mark.smoke +def test_get_networking_rules(test_linode_client, test_firewall): + firewall = test_linode_client.load(Firewall, test_firewall.id) + + rules = firewall.get_rules() + + assert "inbound" in str(rules) + assert "inbound_policy" in str(rules) + assert "outbound" in str(rules) + assert "outbound_policy" in str(rules) + + +@pytest.fixture +def create_linode_without_firewall(test_linode_client): + """ + WARNING: This is specifically reserved for Firewall testing. + Don't use this if the Linode will not be assigned to a firewall. + """ + + client = test_linode_client + region = get_region(client, {"Cloud Firewall"}, "core").id + + label = get_test_label() + + instance = client.linode.instance_create( + "g6-nanode-1", + region, + label=label, + ) + + yield client, instance + + instance.delete() + + +@pytest.fixture +def create_firewall_with_device(create_linode_without_firewall): + client, target_instance = create_linode_without_firewall + + firewall = client.networking.firewall_create( + get_test_label(), + rules={ + "inbound_policy": "DROP", + "outbound_policy": "DROP", + }, + devices=FirewallCreateDevicesOptions(linodes=[target_instance.id]), + ) + + yield firewall, target_instance + + firewall.delete() + + +def test_get_networking_rule_versions(test_linode_client, test_firewall): + firewall = test_linode_client.load(Firewall, test_firewall.id) + + # Update the firewall's rules + new_rules = { + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": ["0.0.0.0/0"], + "ipv6": ["ff00::/8"], + }, + "description": "A really cool firewall rule.", + "label": "really-cool-firewall-rule", + "ports": "80", + "protocol": "TCP", + } + ], + "inbound_policy": "ACCEPT", + "outbound": [], + "outbound_policy": "DROP", + } + firewall.update_rules(new_rules) + time.sleep(1) + + rule_versions = firewall.rule_versions + + # Original firewall rules + old_rule_version = firewall.get_rule_version(1) + + # Updated firewall rules + new_rule_version = firewall.get_rule_version(2) + + assert "rules" in str(rule_versions) + assert "version" in str(rule_versions) + assert rule_versions["results"] == 2 + + assert old_rule_version["inbound"] == [] + assert old_rule_version["inbound_policy"] == "ACCEPT" + assert old_rule_version["outbound"] == [] + assert old_rule_version["outbound_policy"] == "DROP" + assert old_rule_version["version"] == 1 + + assert ( + new_rule_version["inbound"][0]["description"] + == "A really cool firewall rule." + ) + assert new_rule_version["inbound_policy"] == "ACCEPT" + assert new_rule_version["outbound"] == [] + assert new_rule_version["outbound_policy"] == "DROP" + assert new_rule_version["version"] == 2 + + +@pytest.mark.smoke +def test_ip_addresses_share( + test_linode_client, + create_linode_for_ip_share, + create_linode_to_be_shared_with_ips, +): + """ + Test that you can share IP addresses with Linode. + """ + + # create two linode instances and share the ip of instance1 with instance2 + linode_instance1 = create_linode_for_ip_share + linode_instance2 = create_linode_to_be_shared_with_ips + + test_linode_client.networking.ip_addresses_share( + [linode_instance1.ips.ipv4.public[0]], linode_instance2.id + ) + + assert ( + linode_instance1.ips.ipv4.public[0].address + == linode_instance2.ips.ipv4.shared[0].address + ) + + +@pytest.mark.smoke +def test_ip_addresses_unshare( + test_linode_client, + create_linode_for_ip_share, + create_linode_to_be_shared_with_ips, +): + """ + Test that you can unshare IP addresses with Linode. + """ + + # create two linode instances and share the ip of instance1 with instance2 + linode_instance1 = create_linode_for_ip_share + linode_instance2 = create_linode_to_be_shared_with_ips + + test_linode_client.networking.ip_addresses_share( + [linode_instance1.ips.ipv4.public[0]], linode_instance2.id + ) + + # unshared the ip with instance2 + test_linode_client.networking.ip_addresses_share([], linode_instance2.id) + + assert [] == linode_instance2.ips.ipv4.shared + + +def test_ip_info_vpc(test_linode_client, create_vpc_with_subnet_and_linode): + vpc, subnet, linode, _ = create_vpc_with_subnet_and_linode + + config: Config = linode.configs[0] + + config.interfaces = [] + config.save() + + _ = config.interface_create_vpc( + subnet=subnet, + primary=True, + ipv4=ConfigInterfaceIPv4(vpc="10.0.0.2", nat_1_1="any"), + ip_ranges=["10.0.0.5/32"], + ) + + config.invalidate() + + ip_info = test_linode_client.load(IPAddress, linode.ipv4[0]) + + assert ip_info.vpc_nat_1_1.address == "10.0.0.2" + assert ip_info.vpc_nat_1_1.vpc_id == vpc.id + assert ip_info.vpc_nat_1_1.subnet_id == subnet.id + + +def test_network_transfer_prices(test_linode_client): + transfer_prices = test_linode_client.networking.transfer_prices() + + if len(transfer_prices) > 0: + assert type(transfer_prices[0]) is NetworkTransferPrice + assert type(transfer_prices[0].price) is Price + assert ( + transfer_prices[0].price is None + or transfer_prices[0].price.hourly >= 0 + ) + + +def test_allocate_and_delete_ip(test_linode_client, create_linode): + linode = create_linode + ip = test_linode_client.networking.ip_allocate(linode.id) + linode.invalidate() + + assert ip.linode_id == linode.id + assert ip.address in linode.ipv4 + + is_deleted = ip.delete() + + assert is_deleted is True + + +def get_status(linode: Instance, status: str): + return linode.status == status + + +def test_create_and_delete_vlan(test_linode_client, linode_for_vlan_tests): + linode = linode_for_vlan_tests + + config: Config = linode.configs[0] + + config.interfaces = [] + config.save() + + vlan_label = f"{get_test_label(8)}-testvlan" + interface = config.interface_create_vlan( + label=vlan_label, ipam_address="10.0.0.2/32" + ) + + config.invalidate() + + assert interface.id == config.interfaces[0].id + assert interface.purpose == "vlan" + assert interface.label == vlan_label + + # Remove the VLAN interface and reboot Linode + config.interfaces = [] + config.save() + + wait_for_condition(3, 100, get_status, linode, "running") + + retry_sending_request(3, linode.reboot) + + wait_for_condition(3, 100, get_status, linode, "rebooting") + assert linode.status == "rebooting" + + wait_for_condition(3, 100, get_status, linode, "running") + + # Delete the VLAN + is_deleted = test_linode_client.networking.delete_vlan( + vlan_label, linode.region + ) + + assert is_deleted is True + + +def test_create_firewall_with_linode_device(create_firewall_with_device): + firewall, target_instance = create_firewall_with_device + + devices = firewall.devices + + assert len(devices) == 1 + assert devices[0].entity.id == target_instance.id + + +# TODO (Enhanced Interfaces): Add test for interface device + + +def test_get_global_firewall_settings(test_linode_client): + settings = test_linode_client.networking.firewall_settings() + + assert settings.default_firewall_ids is not None + assert all( + k in {"vpc_interface", "public_interface", "linode", "nodebalancer"} + for k in vars(settings.default_firewall_ids).keys() + ) + + +def test_ip_info(test_linode_client, create_linode): + linode = create_linode + wait_for_condition(3, 100, get_status, linode, "running") + + ip_info = test_linode_client.load(IPAddress, linode.ipv4[0]) + + assert ip_info.address == linode.ipv4[0] + assert ip_info.gateway is not None + assert ip_info.linode_id == linode.id + assert ip_info.interface_id is None + assert ip_info.prefix == 24 + assert ip_info.public + assert ip_info.rdns is not None + assert ip_info.region.id == linode.region.id + assert ip_info.subnet_mask is not None + assert ip_info.type == "ipv4" + assert ip_info.vpc_nat_1_1 is None diff --git a/test/integration/models/nodebalancer/test_nodebalancer.py b/test/integration/models/nodebalancer/test_nodebalancer.py new file mode 100644 index 000000000..692efb027 --- /dev/null +++ b/test/integration/models/nodebalancer/test_nodebalancer.py @@ -0,0 +1,274 @@ +import re +from test.integration.conftest import ( + get_api_ca_file, + get_api_url, + get_region, + get_token, +) +from test.integration.helpers import get_test_label + +import pytest + +from linode_api4 import ApiError, LinodeClient, NodeBalancer +from linode_api4.objects import ( + NodeBalancerConfig, + NodeBalancerNode, + NodeBalancerType, + RegionPrice, +) + +TEST_REGION = get_region( + LinodeClient( + token=get_token(), + base_url=get_api_url(), + ca_path=get_api_ca_file(), + ), + {"Linodes", "Cloud Firewall", "NodeBalancers"}, + site_type="core", +) + + +@pytest.fixture(scope="session") +def linode_with_private_ip(test_linode_client, e2e_test_firewall): + client = test_linode_client + label = get_test_label(8) + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + TEST_REGION, + image="linode/debian12", + label=label, + private_ip=True, + firewall=e2e_test_firewall, + ) + + yield linode_instance + + linode_instance.delete() + + +@pytest.fixture(scope="session") +def create_nb_config(test_linode_client, e2e_test_firewall): + client = test_linode_client + label = get_test_label(8) + + nb = client.nodebalancer_create( + region=TEST_REGION, label=label, firewall=e2e_test_firewall.id + ) + + config = nb.config_create() + + yield config + + config.delete() + nb.delete() + + +@pytest.fixture(scope="session") +def create_nb_config_with_udp(test_linode_client, e2e_test_firewall): + client = test_linode_client + label = get_test_label(8) + + nb = client.nodebalancer_create( + region=TEST_REGION, label=label, firewall=e2e_test_firewall.id + ) + + config = nb.config_create(protocol="udp", udp_check_port=1234) + + yield config + + config.delete() + nb.delete() + + +@pytest.fixture(scope="session") +def create_nb(test_linode_client, e2e_test_firewall): + client = test_linode_client + label = get_test_label(8) + + nb = client.nodebalancer_create( + region=TEST_REGION, label=label, firewall=e2e_test_firewall.id + ) + + yield nb + + nb.delete() + + +def test_create_nb(test_linode_client, e2e_test_firewall): + client = test_linode_client + label = get_test_label(8) + + nb = client.nodebalancer_create( + region=TEST_REGION, + label=label, + firewall=e2e_test_firewall.id, + client_udp_sess_throttle=5, + ) + + assert TEST_REGION, nb.region + assert label == nb.label + assert 5 == nb.client_udp_sess_throttle + + nb.delete() + + +def test_get_nodebalancer_config(test_linode_client, create_nb_config): + config = test_linode_client.load( + NodeBalancerConfig, + create_nb_config.id, + create_nb_config.nodebalancer_id, + ) + + +def test_get_nb_config_with_udp(test_linode_client, create_nb_config_with_udp): + config = test_linode_client.load( + NodeBalancerConfig, + create_nb_config_with_udp.id, + create_nb_config_with_udp.nodebalancer_id, + ) + + assert "udp" == config.protocol + assert 1234 == config.udp_check_port + assert 2 == config.udp_session_timeout + + +def test_update_nb_config(test_linode_client, create_nb_config_with_udp): + config = test_linode_client.load( + NodeBalancerConfig, + create_nb_config_with_udp.id, + create_nb_config_with_udp.nodebalancer_id, + ) + + config.udp_check_port = 4321 + config.save() + + config_updated = test_linode_client.load( + NodeBalancerConfig, + create_nb_config_with_udp.id, + create_nb_config_with_udp.nodebalancer_id, + ) + + assert 4321 == config_updated.udp_check_port + + +def test_get_nb(test_linode_client, create_nb): + nb = test_linode_client.load( + NodeBalancer, + create_nb.id, + ) + + assert nb.id == create_nb.id + + +def test_update_nb(test_linode_client, create_nb): + nb = test_linode_client.load( + NodeBalancer, + create_nb.id, + ) + + new_label = f"{nb.label}-ThisNewLabel" + + nb.label = new_label + nb.client_udp_sess_throttle = 5 + nb.save() + + nb_updated = test_linode_client.load( + NodeBalancer, + create_nb.id, + ) + + assert new_label == nb_updated.label + assert 5 == nb_updated.client_udp_sess_throttle + + +@pytest.mark.smoke +def test_create_nb_node( + test_linode_client, create_nb_config, linode_with_private_ip +): + config = test_linode_client.load( + NodeBalancerConfig, + create_nb_config.id, + create_nb_config.nodebalancer_id, + ) + linode = linode_with_private_ip + address = [a for a in linode.ipv4 if re.search("192.168.+", a)][0] + node = config.node_create( + "node_test", address + ":80", weight=50, mode="accept" + ) + + assert re.search("192.168.+:[0-9]+", node.address) + assert "node_test" == node.label + + +@pytest.mark.smoke +def test_get_nb_node(test_linode_client, create_nb_config): + node = test_linode_client.load( + NodeBalancerNode, + create_nb_config.nodes[0].id, + (create_nb_config.id, create_nb_config.nodebalancer_id), + ) + + +def test_update_nb_node(test_linode_client, create_nb_config): + config = test_linode_client.load( + NodeBalancerConfig, + create_nb_config.id, + create_nb_config.nodebalancer_id, + ) + node = config.nodes[0] + + new_label = f"{node.label}-ThisNewLabel" + + node.label = new_label + node.weight = 50 + node.mode = "accept" + node.save() + + node_updated = test_linode_client.load( + NodeBalancerNode, + create_nb_config.nodes[0].id, + (create_nb_config.id, create_nb_config.nodebalancer_id), + ) + + assert new_label == node_updated.label + assert 50 == node_updated.weight + assert "accept" == node_updated.mode + + +def test_delete_nb_node(test_linode_client, create_nb_config): + config = test_linode_client.load( + NodeBalancerConfig, + create_nb_config.id, + create_nb_config.nodebalancer_id, + ) + node = config.nodes[0] + + node.delete() + + with pytest.raises(ApiError) as e: + test_linode_client.load( + NodeBalancerNode, + create_nb_config.nodes[0].id, + (create_nb_config.id, create_nb_config.nodebalancer_id), + ) + assert "Not Found" in str(e.json) + + +def test_nodebalancer_types(test_linode_client): + types = test_linode_client.nodebalancers.types() + + if len(types) > 0: + for nb_type in types: + assert type(nb_type) is NodeBalancerType + assert nb_type.price.monthly is None or ( + isinstance(nb_type.price.monthly, (float, int)) + and nb_type.price.monthly >= 0 + ) + if len(nb_type.region_prices) > 0: + region_price = nb_type.region_prices[0] + assert type(region_price) is RegionPrice + assert region_price.monthly is None or ( + isinstance(region_price.monthly, (float, int)) + and region_price.monthly >= 0 + ) diff --git a/test/integration/models/object_storage/test_obj.py b/test/integration/models/object_storage/test_obj.py new file mode 100644 index 000000000..047dfbdb4 --- /dev/null +++ b/test/integration/models/object_storage/test_obj.py @@ -0,0 +1,215 @@ +import time +from test.integration.helpers import send_request_when_resource_available + +import pytest + +from linode_api4.common import RegionPrice +from linode_api4.linode_client import LinodeClient +from linode_api4.objects.object_storage import ( + ObjectStorageACL, + ObjectStorageBucket, + ObjectStorageCluster, + ObjectStorageEndpointType, + ObjectStorageKeyPermission, + ObjectStorageKeys, + ObjectStorageType, +) + + +@pytest.fixture(scope="session") +def region(test_linode_client: LinodeClient): + return "us-southeast" # uncomment get_region(test_linode_client, {"Object Storage"}).id + + +@pytest.fixture(scope="session") +def endpoints(test_linode_client: LinodeClient): + return test_linode_client.object_storage.endpoints() + + +@pytest.fixture(scope="session") +def bucket( + test_linode_client: LinodeClient, region: str +) -> ObjectStorageBucket: + bucket = test_linode_client.object_storage.bucket_create( + cluster_or_region=region, + label="bucket-" + str(time.time_ns()), + acl=ObjectStorageACL.PRIVATE, + cors_enabled=False, + ) + + yield bucket + send_request_when_resource_available(timeout=100, func=bucket.delete) + + +@pytest.fixture(scope="session") +def bucket_with_endpoint( + test_linode_client: LinodeClient, endpoints +) -> ObjectStorageBucket: + selected_endpoint = next( + ( + e + for e in endpoints + if e.endpoint_type == ObjectStorageEndpointType.E1 + ), + None, + ) + + bucket = test_linode_client.object_storage.bucket_create( + cluster_or_region=selected_endpoint.region, + label="bucket-" + str(time.time_ns()), + acl=ObjectStorageACL.PRIVATE, + cors_enabled=False, + endpoint_type=selected_endpoint.endpoint_type, + ) + + yield bucket + + send_request_when_resource_available(timeout=100, func=bucket.delete) + + +@pytest.fixture(scope="session") +def obj_key(test_linode_client: LinodeClient): + key = test_linode_client.object_storage.keys_create( + label="obj-key-" + str(time.time_ns()), + ) + + yield key + key.delete() + + +@pytest.fixture(scope="session") +def obj_limited_key( + test_linode_client: LinodeClient, region: str, bucket: ObjectStorageBucket +): + key = test_linode_client.object_storage.keys_create( + label="obj-limited-key-" + str(time.time_ns()), + bucket_access=test_linode_client.object_storage.bucket_access( + cluster_or_region=region, + bucket_name=bucket.label, + permissions=ObjectStorageKeyPermission.READ_ONLY, + ), + regions=[region], + ) + + yield key + key.delete() + + +def test_keys( + test_linode_client: LinodeClient, + obj_key: ObjectStorageKeys, + obj_limited_key: ObjectStorageKeys, +): + loaded_key = test_linode_client.load(ObjectStorageKeys, obj_key.id) + loaded_limited_key = test_linode_client.load( + ObjectStorageKeys, obj_limited_key.id + ) + + assert loaded_key.label == obj_key.label + assert loaded_limited_key.label == obj_limited_key.label + assert ( + loaded_limited_key.regions[0].endpoint_type + in ObjectStorageEndpointType.__members__.values() + ) + + +def test_bucket(test_linode_client: LinodeClient, bucket: ObjectStorageBucket): + loaded_bucket = test_linode_client.load( + ObjectStorageBucket, + target_id=bucket.label, + target_parent_id=bucket.region, + ) + + assert loaded_bucket.label == bucket.label + assert loaded_bucket.region == bucket.region + + +def test_bucket_with_endpoint( + test_linode_client: LinodeClient, bucket_with_endpoint: ObjectStorageBucket +): + loaded_bucket = test_linode_client.load( + ObjectStorageBucket, + target_id=bucket_with_endpoint.label, + target_parent_id=bucket_with_endpoint.region, + ) + + assert loaded_bucket.label == bucket_with_endpoint.label + assert loaded_bucket.region == bucket_with_endpoint.region + assert loaded_bucket.s3_endpoint is not None + assert loaded_bucket.endpoint_type == "E1" + + +def test_buckets_in_region( + test_linode_client: LinodeClient, + bucket: ObjectStorageBucket, + region: str, +): + buckets = test_linode_client.object_storage.buckets_in_region(region=region) + assert len(buckets) >= 1 + assert any(b.label == bucket.label for b in buckets) + + +@pytest.mark.smoke +def test_list_obj_storage_bucket( + test_linode_client: LinodeClient, + bucket: ObjectStorageBucket, +): + buckets = test_linode_client.object_storage.buckets() + target_bucket_id = bucket.id + assert any(target_bucket_id == b.id for b in buckets) + + +def test_bucket_access_get(bucket: ObjectStorageBucket): + access = bucket.access_get() + + assert access.acl is not None + assert access.acl_xml is not None + assert access.cors_enabled is not None + + +def test_bucket_access_modify(bucket: ObjectStorageBucket): + bucket.access_modify(ObjectStorageACL.PRIVATE, cors_enabled=True) + + +def test_bucket_access_update(bucket: ObjectStorageBucket): + bucket.access_update(ObjectStorageACL.PRIVATE, cors_enabled=True) + + +def test_get_ssl_cert(bucket: ObjectStorageBucket): + assert not bucket.ssl_cert().ssl + + +def test_get_cluster( + test_linode_client: LinodeClient, bucket: ObjectStorageBucket +): + cluster = test_linode_client.load(ObjectStorageCluster, bucket.cluster) + + assert "linodeobjects.com" in cluster.domain + assert cluster.id == bucket.cluster + assert "available" == cluster.status + + +def test_get_buckets_in_cluster( + test_linode_client: LinodeClient, bucket: ObjectStorageBucket +): + cluster = test_linode_client.load(ObjectStorageCluster, bucket.cluster) + assert any(bucket.id == b.id for b in cluster.buckets_in_cluster()) + + +def test_object_storage_types(test_linode_client): + types = test_linode_client.object_storage.types() + + if len(types) > 0: + for object_storage_type in types: + assert type(object_storage_type) is ObjectStorageType + assert object_storage_type.price.monthly is None or ( + isinstance(object_storage_type.price.monthly, (float, int)) + and object_storage_type.price.monthly >= 0 + ) + if len(object_storage_type.region_prices) > 0: + region_price = object_storage_type.region_prices[0] + assert type(region_price) is RegionPrice + assert object_storage_type.price.monthly is None or ( + isinstance(object_storage_type.price.monthly, (float, int)) + and object_storage_type.price.monthly >= 0 + ) diff --git a/test/integration/models/object_storage/test_obj_quotas.py b/test/integration/models/object_storage/test_obj_quotas.py new file mode 100644 index 000000000..10a546bc7 --- /dev/null +++ b/test/integration/models/object_storage/test_obj_quotas.py @@ -0,0 +1,45 @@ +import pytest + +from linode_api4.objects.object_storage import ( + ObjectStorageQuota, + ObjectStorageQuotaUsage, +) + + +def test_list_and_get_obj_storage_quotas(test_linode_client): + quotas = test_linode_client.object_storage.quotas() + + if len(quotas) < 1: + pytest.skip("No available quota for testing. Skipping now...") + + found_quota = quotas[0] + + get_quota = test_linode_client.load( + ObjectStorageQuota, found_quota.quota_id + ) + + assert found_quota.quota_id == get_quota.quota_id + assert found_quota.quota_name == get_quota.quota_name + assert found_quota.endpoint_type == get_quota.endpoint_type + assert found_quota.s3_endpoint == get_quota.s3_endpoint + assert found_quota.description == get_quota.description + assert found_quota.quota_limit == get_quota.quota_limit + assert found_quota.resource_metric == get_quota.resource_metric + + +def test_get_obj_storage_quota_usage(test_linode_client): + quotas = test_linode_client.object_storage.quotas() + + if len(quotas) < 1: + pytest.skip("No available quota for testing. Skipping now...") + + quota_id = quotas[0].quota_id + quota = test_linode_client.load(ObjectStorageQuota, quota_id) + + quota_usage = quota.usage() + + assert isinstance(quota_usage, ObjectStorageQuotaUsage) + assert quota_usage.quota_limit >= 0 + + if quota_usage.usage is not None: + assert quota_usage.usage >= 0 diff --git a/test/integration/models/placement/test_placement.py b/test/integration/models/placement/test_placement.py new file mode 100644 index 000000000..21c6519f5 --- /dev/null +++ b/test/integration/models/placement/test_placement.py @@ -0,0 +1,113 @@ +from test.integration.conftest import get_region +from test.integration.helpers import ( + get_test_label, + send_request_when_resource_available, +) + +import pytest + +from linode_api4 import ( + MigratedInstance, + MigrationType, + PlacementGroup, + PlacementGroupPolicy, + PlacementGroupType, +) + + +@pytest.mark.smoke +def test_get_pg(test_linode_client, create_placement_group): + """ + Tests that a Placement Group can be loaded. + """ + pg = test_linode_client.load(PlacementGroup, create_placement_group.id) + assert pg.id == create_placement_group.id + + +@pytest.mark.smoke +def test_update_pg(test_linode_client, create_placement_group): + """ + Tests that a Placement Group can be updated successfully. + """ + pg = create_placement_group + new_label = create_placement_group.label + "-updated" + + pg.label = new_label + pg.save() + + pg = test_linode_client.load(PlacementGroup, pg.id) + + assert pg.label == new_label + + +def test_pg_assignment(test_linode_client, create_placement_group_with_linode): + """ + Tests that a Placement Group can be updated successfully. + """ + pg, inst = create_placement_group_with_linode + + assert pg.members[0].linode_id == inst.id + assert inst.placement_group.id == pg.id + + pg.unassign([inst]) + inst.invalidate() + + assert len(pg.members) == 0 + assert inst.placement_group is None + + pg.assign([inst]) + inst.invalidate() + + assert pg.members[0].linode_id == inst.id + assert inst.placement_group.id == pg.id + + +def test_pg_migration( + test_linode_client, e2e_test_firewall, create_placement_group +): + """ + Tests that an instance can be migrated into and our of PGs successfully. + """ + client = test_linode_client + + label_pg = get_test_label(10) + + label_instance = get_test_label(10) + + pg_outbound = client.placement.group_create( + label_pg, + get_region(test_linode_client, {"Placement Group"}), + PlacementGroupType.anti_affinity_local, + PlacementGroupPolicy.flexible, + ) + + linode = client.linode.instance_create( + "g6-nanode-1", + pg_outbound.region, + label=label_instance, + placement_group=pg_outbound, + ) + + pg_inbound = create_placement_group + + # Says it could take up to ~6 hrs for migration to fully complete + send_request_when_resource_available( + 300, + linode.initiate_migration, + placement_group=pg_inbound.id, + migration_type=MigrationType.COLD, + region=pg_inbound.region, + ) + + pg_inbound = test_linode_client.load(PlacementGroup, pg_inbound.id) + pg_outbound = test_linode_client.load(PlacementGroup, pg_outbound.id) + + assert pg_inbound.migrations.inbound[0] == MigratedInstance( + linode_id=linode.id + ) + assert pg_outbound.migrations.outbound[0] == MigratedInstance( + linode_id=linode.id + ) + + linode.delete() + pg_outbound.delete() diff --git a/test/integration/models/profile/test_profile.py b/test/integration/models/profile/test_profile.py new file mode 100644 index 000000000..6942eea38 --- /dev/null +++ b/test/integration/models/profile/test_profile.py @@ -0,0 +1,42 @@ +import pytest + +from linode_api4.objects import PersonalAccessToken, Profile, SSHKey + + +@pytest.mark.smoke +def test_user_profile(test_linode_client): + client = test_linode_client + + profile = client.profile() + + assert isinstance(profile, Profile) + + +def test_get_personal_access_token_objects(test_linode_client): + client = test_linode_client + + personal_access_tokens = client.profile.tokens() + + if len(personal_access_tokens) > 0: + assert isinstance(personal_access_tokens[0], PersonalAccessToken) + + +@pytest.mark.smoke +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_get_sshkeys(test_linode_client, test_sshkey): + client = test_linode_client + + ssh_keys = client.profile.ssh_keys() + + ssh_labels = [i.label for i in ssh_keys] + + assert isinstance(test_sshkey, SSHKey) + assert test_sshkey.label in ssh_labels + + +@pytest.mark.flaky(reruns=3, reruns_delay=2) +def test_ssh_key_create(test_sshkey, ssh_key_gen): + pub_key = ssh_key_gen[0] + key = test_sshkey + + assert pub_key == key._raw_json["ssh_key"] diff --git a/test/integration/models/region/test_region.py b/test/integration/models/region/test_region.py new file mode 100644 index 000000000..d9d4006a7 --- /dev/null +++ b/test/integration/models/region/test_region.py @@ -0,0 +1,62 @@ +import pytest + +from linode_api4.objects import Region + + +@pytest.mark.smoke +def test_list_regions_vpc_availability(test_linode_client): + """ + Test listing VPC availability for all regions. + """ + client = test_linode_client + + vpc_availability = client.regions.vpc_availability() + + assert len(vpc_availability) > 0 + + for entry in vpc_availability: + assert entry.region is not None + assert len(entry.region) > 0 + assert entry.available is not None + assert isinstance(entry.available, bool) + # available_ipv6_prefix_lengths may be empty list but should exist + assert entry.available_ipv6_prefix_lengths is not None + assert isinstance(entry.available_ipv6_prefix_lengths, list) + + +@pytest.mark.smoke +def test_get_region_vpc_availability_via_object(test_linode_client): + """ + Test getting VPC availability via the Region object property. + """ + client = test_linode_client + + # Get the first available region + regions = client.regions() + assert len(regions) > 0 + test_region_id = regions[0].id + + region = Region(client, test_region_id) + vpc_avail = region.vpc_availability + + assert vpc_avail is not None + assert vpc_avail.region == test_region_id + assert vpc_avail.available is not None + assert isinstance(vpc_avail.available, bool) + assert vpc_avail.available_ipv6_prefix_lengths is not None + assert isinstance(vpc_avail.available_ipv6_prefix_lengths, list) + + +def test_vpc_availability_available_regions(test_linode_client): + """ + Test that some regions have VPC availability enabled. + """ + client = test_linode_client + + vpc_availability = client.regions.vpc_availability() + + # Filter for regions where VPC is available + available_regions = [v for v in vpc_availability if v.available] + + # There should be at least some regions with VPC available + assert len(available_regions) > 0 diff --git a/test/integration/models/sharegroups/test_sharegroups.py b/test/integration/models/sharegroups/test_sharegroups.py new file mode 100644 index 000000000..9c66bad90 --- /dev/null +++ b/test/integration/models/sharegroups/test_sharegroups.py @@ -0,0 +1,251 @@ +import datetime +from test.integration.conftest import get_region +from test.integration.helpers import ( + get_test_label, +) + +import pytest + +from linode_api4.objects import ( + Image, + ImageShareGroup, + ImageShareGroupImagesToAdd, + ImageShareGroupImageToAdd, + ImageShareGroupImageToUpdate, + ImageShareGroupMemberToAdd, + ImageShareGroupMemberToUpdate, + ImageShareGroupToken, +) + + +def wait_for_image_status( + test_linode_client, image_id, expected_status, timeout=360, interval=5 +): + import time + + get_image = test_linode_client.load(Image, image_id) + timer = 0 + while get_image.status != expected_status and timer < timeout: + time.sleep(interval) + timer += interval + get_image = test_linode_client.load(Image, image_id) + if timer >= timeout: + raise TimeoutError( + f"Created image did not reach status '{expected_status}' within {timeout} seconds." + ) + + +@pytest.fixture(scope="class") +def sample_linode(test_linode_client, e2e_test_firewall): + client = test_linode_client + region = get_region(client, {"Linodes", "Cloud Firewall"}, site_type="core") + label = get_test_label(length=8) + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + region, + image="linode/alpine3.19", + label=label + "_modlinode", + ) + yield linode_instance + linode_instance.delete() + + +@pytest.fixture(scope="class") +def create_image_id(test_linode_client, sample_linode): + create_image = test_linode_client.images.create( + sample_linode.disks[0], + label="linode-api4python-test-image-sharing-image", + ) + wait_for_image_status(test_linode_client, create_image.id, "available") + yield create_image.id + create_image.delete() + + +@pytest.fixture(scope="function") +def share_group_id(test_linode_client): + group_label = get_test_label(8) + "_sharegroup_api4_test" + group = test_linode_client.sharegroups.create_sharegroup( + label=group_label, + description="Test api4python", + ) + yield group.id + group.delete() + + +def test_get_share_groups(test_linode_client, share_group_id): + response = test_linode_client.sharegroups() + sharegroups_list = response.lists[0] + assert len(sharegroups_list) > 0 + assert sharegroups_list[0].api_endpoint == "/images/sharegroups/{id}" + assert sharegroups_list[0].id > 0 + assert sharegroups_list[0].description != "" + assert isinstance(sharegroups_list[0].images_count, int) + assert not sharegroups_list[0].is_suspended + assert sharegroups_list[0].label != "" + assert isinstance(sharegroups_list[0].members_count, int) + assert sharegroups_list[0].uuid != "" + assert isinstance(sharegroups_list[0].created, datetime.date) + assert not sharegroups_list[0].expiry + + +def test_add_update_remove_share_group(test_linode_client): + group_label = get_test_label(8) + "_sharegroup_api4_test" + share_group = test_linode_client.sharegroups.create_sharegroup( + label=group_label, + description="Test api4python create", + ) + assert share_group.api_endpoint == "/images/sharegroups/{id}" + assert share_group.id > 0 + assert share_group.description == "Test api4python create" + assert isinstance(share_group.images_count, int) + assert not share_group.is_suspended + assert share_group.label == group_label + assert isinstance(share_group.members_count, int) + assert share_group.uuid != "" + assert isinstance(share_group.created, datetime.date) + assert not share_group.updated + assert not share_group.expiry + + load_share_group = test_linode_client.load(ImageShareGroup, share_group.id) + assert load_share_group.id == share_group.id + assert load_share_group.description == "Test api4python create" + + load_share_group.label = "Updated Sharegroup Label" + load_share_group.description = "Test update description" + load_share_group.save() + load_share_group_after_update = test_linode_client.load( + ImageShareGroup, share_group.id + ) + assert load_share_group_after_update.id == share_group.id + assert load_share_group_after_update.label == "Updated Sharegroup Label" + assert ( + load_share_group_after_update.description == "Test update description" + ) + + share_group.delete() + with pytest.raises(RuntimeError) as err: + test_linode_client.load(ImageShareGroup, share_group.id) + assert "[404] Not found" in str(err.value) + + +def test_add_get_update_revoke_image_to_share_group( + test_linode_client, create_image_id, share_group_id +): + share_group = test_linode_client.load(ImageShareGroup, share_group_id) + add_image_response = share_group.add_images( + ImageShareGroupImagesToAdd( + images=[ + ImageShareGroupImageToAdd(id=create_image_id), + ] + ) + ) + assert 0 < len(add_image_response) + assert ( + add_image_response[0].image_sharing.shared_by.sharegroup_id + == share_group.id + ) + assert ( + add_image_response[0].image_sharing.shared_by.source_image_id + == create_image_id + ) + + get_response = share_group.get_image_shares() + assert 0 < len(get_response) + assert ( + get_response[0].image_sharing.shared_by.sharegroup_id == share_group.id + ) + assert ( + get_response[0].image_sharing.shared_by.source_image_id + == create_image_id + ) + assert get_response[0].description == "" + + update_response = share_group.update_image_share( + ImageShareGroupImageToUpdate( + image_share_id=get_response[0].id, description="Description update" + ) + ) + assert update_response.description == "Description update" + + share_groups_by_image_id_response = ( + test_linode_client.sharegroups.sharegroups_by_image_id(create_image_id) + ) + assert 0 < len(share_groups_by_image_id_response.lists) + assert share_groups_by_image_id_response.lists[0][0].id == share_group.id + + share_group.revoke_image_share(get_response[0].id) + get_after_revoke_response = share_group.get_image_shares() + assert len(get_after_revoke_response) == 0 + + +def test_list_tokens(test_linode_client): + response = test_linode_client.sharegroups.tokens() + assert response.page_endpoint == "images/sharegroups/tokens" + assert len(response.lists[0]) >= 0 + + +def test_create_token_to_own_share_group_error(test_linode_client): + group_label = get_test_label(8) + "_sharegroup_api4_test" + response_create_share_group = ( + test_linode_client.sharegroups.create_sharegroup( + label=group_label, + description="Test api4python create", + ) + ) + with pytest.raises(RuntimeError) as err: + test_linode_client.sharegroups.create_token( + response_create_share_group.uuid + ) + assert "[400] valid_for_sharegroup_uuid" in str(err.value) + assert "You may not create a token for your own sharegroup" in str( + err.value + ) + + response_create_share_group.delete() + + +def test_get_invalid_token(test_linode_client): + with pytest.raises(RuntimeError) as err: + test_linode_client.load(ImageShareGroupToken, "36b0-4d52_invalid") + assert "[404] Not found" in str(err.value) + + +def test_try_to_add_member_invalid_token(test_linode_client, share_group_id): + share_group = test_linode_client.load(ImageShareGroup, share_group_id) + with pytest.raises(RuntimeError) as err: + share_group.add_member( + ImageShareGroupMemberToAdd( + token="not_existing_token", + label="New Member", + ) + ) + assert "[500] Invalid token format" in str(err.value) + + +def test_list_share_group_members(test_linode_client, share_group_id): + share_group = test_linode_client.load(ImageShareGroup, share_group_id) + response = share_group.get_members() + assert 0 == len(response) + + +def test_try_to_get_update_revoke_share_group_member_by_invalid_token( + test_linode_client, share_group_id +): + share_group = test_linode_client.load(ImageShareGroup, share_group_id) + with pytest.raises(RuntimeError) as err: + share_group.get_member("not_existing_token") + assert "[404] Not found" in str(err.value) + + with pytest.raises(RuntimeError) as err: + share_group.update_member( + ImageShareGroupMemberToUpdate( + token_uuid="not_existing_token", + label="Update Member", + ) + ) + assert "[404] Not found" in str(err.value) + + with pytest.raises(RuntimeError) as err: + share_group.remove_member("not_existing_token") + assert "[404] Not found" in str(err.value) diff --git a/test/integration/models/tag/test_tag.py b/test/integration/models/tag/test_tag.py new file mode 100644 index 000000000..d2edf84c5 --- /dev/null +++ b/test/integration/models/tag/test_tag.py @@ -0,0 +1,22 @@ +from test.integration.helpers import get_test_label + +import pytest + +from linode_api4.objects import Tag + + +@pytest.fixture +def test_tag(test_linode_client): + unique_tag = get_test_label() + "_tag" + tag = test_linode_client.tag_create(unique_tag) + + yield tag + + tag.delete() + + +@pytest.mark.smoke +def test_get_tag(test_linode_client, test_tag): + tag = test_linode_client.load(Tag, test_tag.id) + + assert tag.id == test_tag.id diff --git a/test/integration/models/volume/test_blockstorage.py b/test/integration/models/volume/test_blockstorage.py new file mode 100644 index 000000000..8dac88e18 --- /dev/null +++ b/test/integration/models/volume/test_blockstorage.py @@ -0,0 +1,40 @@ +from test.integration.conftest import get_region +from test.integration.helpers import get_test_label, retry_sending_request + + +def test_config_create_with_extended_volume_limit(test_linode_client): + client = test_linode_client + + region = get_region(client, {"Linodes", "Block Storage"}, site_type="core") + label = get_test_label() + + linode, _ = client.linode.instance_create( + "g6-standard-6", + region, + image="linode/debian12", + label=label, + ) + + volumes = [ + client.volume_create( + f"{label}-vol-{i}", + region=region, + size=10, + ) + for i in range(12) + ] + + config = linode.config_create(volumes=volumes) + + devices = config._raw_json["devices"] + + assert len([d for d in devices.values() if d is not None]) == 12 + + assert "sdi" in devices + assert "sdj" in devices + assert "sdk" in devices + assert "sdl" in devices + + linode.delete() + for v in volumes: + retry_sending_request(3, v.delete) diff --git a/test/integration/models/volume/test_volume.py b/test/integration/models/volume/test_volume.py new file mode 100644 index 000000000..56395d203 --- /dev/null +++ b/test/integration/models/volume/test_volume.py @@ -0,0 +1,169 @@ +import time +from test.integration.conftest import ( + get_api_ca_file, + get_api_url, + get_region, + get_token, +) +from test.integration.helpers import ( + get_test_label, + retry_sending_request, + send_request_when_resource_available, + wait_for_condition, +) + +import pytest + +from linode_api4 import LinodeClient +from linode_api4.objects import RegionPrice, Volume, VolumeType + +TEST_REGION = get_region( + LinodeClient( + token=get_token(), + base_url=get_api_url(), + ca_path=get_api_ca_file(), + ), + {"Linodes", "Cloud Firewall"}, + site_type="core", +) + + +@pytest.fixture(scope="session") +def test_volume(test_linode_client): + client = test_linode_client + label = get_test_label(length=8) + + volume = client.volume_create(label=label, region=TEST_REGION) + + yield volume + + send_request_when_resource_available(timeout=100, func=volume.delete) + + +@pytest.fixture(scope="session") +def linode_for_volume(test_linode_client, e2e_test_firewall): + client = test_linode_client + + label = get_test_label(length=8) + + linode_instance, password = client.linode.instance_create( + "g6-nanode-1", + TEST_REGION, + image="linode/debian12", + label=label, + firewall=e2e_test_firewall, + ) + + yield linode_instance + + send_request_when_resource_available( + timeout=100, func=linode_instance.delete + ) + + +def get_status(volume: Volume, status: str): + client = LinodeClient( + token=get_token(), + base_url=get_api_url(), + ca_path=get_api_ca_file(), + ) + volume = client.load(Volume, volume.id) + return volume.status == status + + +@pytest.mark.smoke +def test_get_volume(test_linode_client, test_volume): + volume = test_linode_client.load(Volume, test_volume.id) + + assert volume.id == test_volume.id + + +def test_get_volume_with_encryption( + test_linode_client, test_volume_with_encryption +): + volume = test_linode_client.load(Volume, test_volume_with_encryption.id) + + assert volume.id == test_volume_with_encryption.id + assert volume.encryption == "enabled" + + +def test_update_volume_tag(test_linode_client, test_volume): + volume = test_volume + tag_1 = get_test_label(10) + tag_2 = get_test_label(10) + + volume.tags = [tag_1, tag_2] + volume.save() + + volume = test_linode_client.load(Volume, test_volume.id) + + assert all(tag in volume.tags for tag in [tag_1, tag_2]) + + +def test_volume_resize(test_linode_client, test_volume): + volume = test_linode_client.load(Volume, test_volume.id) + + wait_for_condition(10, 100, get_status, volume, "active") + + res = retry_sending_request(5, volume.resize, 21) + + assert res + + +def test_volume_clone_and_delete(test_linode_client, test_volume): + volume = test_linode_client.load(Volume, test_volume.id) + label = get_test_label() + + wait_for_condition(10, 100, get_status, volume, "active") + + new_volume = retry_sending_request(5, volume.clone, label) + + assert label == new_volume.label + + res = retry_sending_request(5, new_volume.delete) + + assert res, "new volume deletion failed" + + +def test_attach_volume_to_linode( + test_linode_client, test_volume, linode_for_volume +): + volume = test_volume + linode = linode_for_volume + + res = retry_sending_request(5, volume.attach, linode.id, backoff=30) + + assert res + + +def test_detach_volume_to_linode( + test_linode_client, test_volume, linode_for_volume +): + volume = test_volume + linode = linode_for_volume + + res = retry_sending_request(5, volume.detach) + + assert res + + # time wait for volume to detach before deletion occurs + time.sleep(30) + + +def test_volume_types(test_linode_client): + types = test_linode_client.volumes.types() + + if len(types) > 0: + for volume_type in types: + assert type(volume_type) is VolumeType + assert volume_type.price.monthly is None or ( + isinstance(volume_type.price.monthly, (float, int)) + and volume_type.price.monthly >= 0 + ) + if len(volume_type.region_prices) > 0: + region_price = volume_type.region_prices[0] + assert type(region_price) is RegionPrice + assert region_price.monthly is None or ( + isinstance(region_price.monthly, (float, int)) + and region_price.monthly >= 0 + ) diff --git a/test/integration/models/vpc/test_vpc.py b/test/integration/models/vpc/test_vpc.py new file mode 100644 index 000000000..85d32d858 --- /dev/null +++ b/test/integration/models/vpc/test_vpc.py @@ -0,0 +1,140 @@ +from test.integration.conftest import get_region + +import pytest + +from linode_api4 import VPC, ApiError, VPCSubnet + + +@pytest.mark.smoke +def test_get_vpc(test_linode_client, create_vpc): + vpc = test_linode_client.load(VPC, create_vpc.id) + test_linode_client.vpcs() + assert vpc.id == create_vpc.id + assert isinstance(vpc.ipv6[0].range, str) + + +@pytest.mark.smoke +def test_update_vpc(test_linode_client, create_vpc): + vpc = create_vpc + new_label = create_vpc.label + "-updated" + new_desc = "updated description" + + vpc.label = new_label + vpc.description = new_desc + vpc.save() + + vpc = test_linode_client.load(VPC, create_vpc.id) + + assert vpc.label == new_label + assert vpc.description == new_desc + + +def test_get_subnet(test_linode_client, create_vpc_with_subnet): + vpc, subnet = create_vpc_with_subnet + loaded_subnet = test_linode_client.load(VPCSubnet, subnet.id, vpc.id) + assert loaded_subnet.ipv4 == subnet.ipv4 + assert loaded_subnet.ipv6 is not None + assert loaded_subnet.ipv6[0].range.startswith( + vpc.ipv6[0].range.split("::")[0] + ) + assert loaded_subnet.id == subnet.id + + +@pytest.mark.smoke +def test_update_subnet(test_linode_client, create_vpc_with_subnet): + vpc, subnet = create_vpc_with_subnet + new_label = subnet.label + "-updated" + + subnet.label = new_label + subnet.save() + + subnet = test_linode_client.load(VPCSubnet, subnet.id, vpc.id) + + assert subnet.label == new_label + + +def test_fails_create_vpc_invalid_data(test_linode_client): + with pytest.raises(ApiError) as excinfo: + test_linode_client.vpcs.create( + label="invalid_label!!", + region=get_region(test_linode_client, {"VPCs"}), + description="test description", + ) + assert excinfo.value.status == 400 + + +def test_get_all_vpcs(test_linode_client, create_multiple_vpcs): + vpc_1, vpc_2 = create_multiple_vpcs + + all_vpcs = test_linode_client.vpcs() + + assert str(vpc_1) in str(all_vpcs.lists) + assert str(vpc_2) in str(all_vpcs.lists) + + +def test_fails_update_vpc_invalid_data(create_vpc): + vpc = create_vpc + + invalid_label = "invalid!!" + vpc.label = invalid_label + + with pytest.raises(ApiError) as excinfo: + vpc.save() + + assert excinfo.value.status == 400 + + +def test_fails_create_subnet_invalid_data(create_vpc): + invalid_ipv4 = "10.0.0.0" + + with pytest.raises(ApiError) as excinfo: + create_vpc.subnet_create("test-subnet", ipv4=invalid_ipv4) + + assert excinfo.value.status == 400 + error_msg = str(excinfo.value.json) + + assert "Must be an IPv4 network" in error_msg + + +def test_fails_update_subnet_invalid_data(create_vpc_with_subnet): + invalid_label = "invalid_subnet_label!!" + vpc, subnet = create_vpc_with_subnet + subnet.label = invalid_label + + with pytest.raises(ApiError) as excinfo: + subnet.save() + + assert excinfo.value.status == 400 + assert "Must only use ASCII" in str(excinfo.value.json) + + +def test_fails_create_subnet_with_invalid_ipv6_range(create_vpc): + valid_ipv4 = "10.0.0.0/24" + invalid_ipv6 = [{"range": "2600:3c11:e5b9::/5a"}] + + with pytest.raises(ApiError) as excinfo: + create_vpc.subnet_create( + label="bad-ipv6-subnet", + ipv4=valid_ipv4, + ipv6=invalid_ipv6, + ) + + assert excinfo.value.status == 400 + error = excinfo.value.json["errors"] + + assert any( + e["field"] == "ipv6[0].range" + and "Must be an IPv6 network" in e["reason"] + for e in error + ) + + +def test_get_vpc_ipv6s(test_linode_client): + ipv6s = test_linode_client.get("/vpcs/ipv6s")["data"] + + assert isinstance(ipv6s, list) + + for ipv6 in ipv6s: + assert "vpc_id" in ipv6 + assert isinstance(ipv6["ipv6_range"], str) + assert isinstance(ipv6["ipv6_addresses"], list) diff --git a/test/unit/__init__.py b/test/unit/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/unit/base.py b/test/unit/base.py new file mode 100644 index 000000000..bc0ec2f08 --- /dev/null +++ b/test/unit/base.py @@ -0,0 +1,230 @@ +import json +from test.unit.fixtures import TestFixtures +from unittest import TestCase + +from mock import patch + +from linode_api4 import LinodeClient, MonitorClient + +FIXTURES = TestFixtures() + + +class MockResponse: + def __init__(self, status_code, json, headers={}): + self.status_code = status_code + self._json = json + # Headers is a dict, do not want to use a getter here + self.headers = headers + + def json(self): + return self._json + + +def load_json(url): + """ + Returns a dict from a .json file living in /test/response/GET/{url}.json + + :param url: The URL being accessed whose JSON is to be returned + + :returns: A dict containing the loaded JSON from that file + """ + formatted_url = url + + while formatted_url.startswith("/"): + formatted_url = formatted_url[1:] + + return FIXTURES.get_fixture(formatted_url) + + +def mock_get(url, headers=None, data=None, **kwargs): + """ + Loads the response from a JSON file + """ + response = load_json(url) + + return MockResponse(200, response) + + +class MethodMock: + """ + This class is used to mock methods on requests and store the parameters + and headers it was called with. + """ + + def __init__(self, method, return_dct): + """ + Creates and initiates a new MethodMock with the given details + + :param method: The HTTP method we are mocking + :param return_dct: The python dct to returned, or the URL for a JSON + file to return + """ + self.method = method + if isinstance(return_dct, dict): + self.return_dct = return_dct + elif isinstance(return_dct, str): + self.return_dct = load_json(return_dct) + else: + raise TypeError( + "return_dct must be a dict or a URL from which the " + "JSON could be loaded" + ) + + def __enter__(self): + """ + Begins the method mocking + """ + self.patch = patch( + "linode_api4.linode_client.requests.Session." + self.method, + return_value=MockResponse(200, self.return_dct), + ) + self.mock = self.patch.start() + return self + + def __exit__(self, exc_type, exc_value, traceback): + """ + Removed the mocked method + """ + self.patch.stop() + + @property + def call_args(self): + """ + A shortcut to accessing the underlying mock object's call args + """ + return self.mock.call_args + + @property + def call_data_raw(self): + """ + A shortcut to access the raw call data, not parsed as JSON + """ + return self.mock.call_args[1]["data"] + + @property + def call_url(self): + """ + A shortcut to accessing the URL called on the underlying mock. We + chop off the first character because our testing base_url has a leading + / we don't want to see. + """ + return self.mock.call_args[0][0][1:] + + @property + def call_data(self): + """ + A shortcut to getting the data param this was called with. Removes all + keys whose values are None + """ + data = json.loads(self.mock.call_args[1]["data"]) + + return {k: v for k, v in data.items() if v is not None} + + @property + def call_headers(self): + """ + A shortcut to getting the headers param this was called with + """ + return self.mock.call_args[1]["headers"] + + @property + def called(self): + """ + A shortcut to check whether the mock function was called. + """ + return self.mock.called + + @property + def call_count(self): + """ + A shortcut to check how many times the mock function was called. + """ + return self.mock.call_count + + +class ClientBaseCase(TestCase): + def setUp(self): + self.client = LinodeClient("testing", base_url="/") + + self.get_patch = patch( + "linode_api4.linode_client.requests.Session.get", + side_effect=mock_get, + ) + self.get_patch.start() + + def tearDown(self): + self.get_patch.stop() + + def mock_get(self, return_dct): + """ + Returns a MethodMock mocking a GET. This should be used in a with + statement. + + :param return_dct: The JSON that should be returned from this GET + + :returns: A MethodMock object who will capture the parameters of the + mocked requests + """ + return MethodMock("get", return_dct) + + def mock_post(self, return_dct): + """ + Returns a MethodMock mocking a POST. This should be used in a with + statement. + + :param return_dct: The JSON that should be returned from this POST + + :returns: A MethodMock object who will capture the parameters of the + mocked requests + """ + return MethodMock("post", return_dct) + + def mock_put(self, return_dct): + """ + Returns a MethodMock mocking a PUT. This should be used in a with + statement. + + :param return_dct: The JSON that should be returned from this PUT + + :returns: A MethodMock object who will capture the parameters of the + mocked requests + """ + return MethodMock("put", return_dct) + + def mock_delete(self): + """ + Returns a MethodMock mocking a DELETE. This should be used in a with + statement. + + :param return_dct: The JSON that should be returned from this DELETE + + :returns: A MethodMock object who will capture the parameters of the + mocked requests + """ + return MethodMock("delete", {}) + + +class MonitorClientBaseCase(TestCase): + def setUp(self): + self.client = MonitorClient("testing", base_url="/") + + self.get_patch = patch( + "linode_api4.linode_client.requests.Session.get", + side_effect=mock_get, + ) + self.get_patch.start() + + def tearDown(self): + self.get_patch.stop() + + def mock_post(self, return_dct): + """ + Returns a MethodMock mocking a POST. This should be used in a with + statement. + + :param return_dct: The JSON that should be returned from this POST + + :returns: A MethodMock object who will capture the parameters of the + mocked requests + """ + return MethodMock("post", return_dct) diff --git a/test/unit/errors_test.py b/test/unit/errors_test.py new file mode 100644 index 000000000..017c96280 --- /dev/null +++ b/test/unit/errors_test.py @@ -0,0 +1,104 @@ +from types import SimpleNamespace +from unittest import TestCase + +from linode_api4.errors import ApiError, UnexpectedResponseError + + +class ApiErrorTest(TestCase): + def test_from_response(self): + mock_response = SimpleNamespace( + status_code=400, + json=lambda: { + "errors": [ + {"reason": "foo"}, + {"field": "bar", "reason": "oh no"}, + ] + }, + text='{"errors": [{"reason": "foo"}, {"field": "bar", "reason": "oh no"}]}', + request=SimpleNamespace( + method="POST", + path_url="foo/bar", + ), + ) + + exc = ApiError.from_response(mock_response) + + assert str(exc) == "POST foo/bar: [400] foo; bar: oh no" + assert exc.status == 400 + assert exc.json == { + "errors": [{"reason": "foo"}, {"field": "bar", "reason": "oh no"}] + } + assert exc.response.request.method == "POST" + assert exc.response.request.path_url == "foo/bar" + + def test_from_response_non_json_body(self): + mock_response = SimpleNamespace( + status_code=500, + json=lambda: None, + text="foobar", + request=SimpleNamespace( + method="POST", + path_url="foo/bar", + ), + ) + + exc = ApiError.from_response(mock_response) + + assert str(exc) == "POST foo/bar: [500] foobar" + assert exc.status == 500 + assert exc.json is None + assert exc.response.request.method == "POST" + assert exc.response.request.path_url == "foo/bar" + + def test_from_response_empty_body(self): + mock_response = SimpleNamespace( + status_code=500, + json=lambda: None, + text=None, + request=SimpleNamespace( + method="POST", + path_url="foo/bar", + ), + ) + + exc = ApiError.from_response(mock_response) + + assert str(exc) == "POST foo/bar: [500] N/A" + assert exc.status == 500 + assert exc.json is None + assert exc.response.request.method == "POST" + assert exc.response.request.path_url == "foo/bar" + + def test_from_response_no_request(self): + mock_response = SimpleNamespace( + status_code=500, json=lambda: None, text="foobar", request=None + ) + + exc = ApiError.from_response(mock_response) + + assert str(exc) == "[500] foobar" + assert exc.status == 500 + assert exc.json is None + assert exc.response.request is None + + +class UnexpectedResponseErrorTest(TestCase): + def test_from_response(self): + mock_response = SimpleNamespace( + status_code=400, + json=lambda: { + "foo": "bar", + }, + request=SimpleNamespace( + method="POST", + path_url="foo/bar", + ), + ) + + exc = UnexpectedResponseError.from_response("foobar", mock_response) + + assert str(exc) == "foobar" + assert exc.status == 400 + assert exc.json == {"foo": "bar"} + assert exc.response.request.method == "POST" + assert exc.response.request.path_url == "foo/bar" diff --git a/test/unit/fixtures.py b/test/unit/fixtures.py new file mode 100644 index 000000000..c943da95c --- /dev/null +++ b/test/unit/fixtures.py @@ -0,0 +1,51 @@ +import json +import re +from pathlib import Path + +FIXTURES_DIR = Path(__file__).parent.parent / "fixtures" + +# This regex is useful for finding individual underscore characters, +# which is necessary to allow us to use underscores in URL paths. +PATH_REPLACEMENT_REGEX = re.compile(r"(? 0 + + for entry in avail_entries: + assert entry.region is not None + assert len(entry.region) > 0 + assert len(entry.plan) > 0 + assert entry.available is not None + + # Ensure all three pages are read + assert m.call_count == 3 + assert m.mock.call_args_list[0].args[0] == "//regions/availability" + + assert ( + m.mock.call_args_list[1].args[0] + == "//regions/availability?page=2&page_size=100" + ) + assert ( + m.mock.call_args_list[2].args[0] + == "//regions/availability?page=3&page_size=100" + ) + + # Ensure the filter headers are correct + for k, call in m.mock.call_args_list: + assert json.loads(call.get("headers").get("X-Filter")) == { + "+and": [{"region": "us-east"}, {"plan": "premium4096.7"}] + } + + def test_list_vpc_availability(self): + """ + Tests that region VPC availability can be listed. + """ + + with self.mock_get("/regions/vpc-availability") as m: + vpc_entries = self.client.regions.vpc_availability() + + assert len(vpc_entries) > 0 + + for entry in vpc_entries: + assert len(entry.region) > 0 + assert entry.available is not None + # available_ipv6_prefix_lengths may be empty list but should exist + assert entry.available_ipv6_prefix_lengths is not None + + # Ensure both pages are read + assert m.call_count == 2 + assert ( + m.mock.call_args_list[0].args[0] == "//regions/vpc-availability" + ) + + assert ( + m.mock.call_args_list[1].args[0] + == "//regions/vpc-availability?page=2&page_size=25" + ) diff --git a/test/unit/groups/vpc_test.py b/test/unit/groups/vpc_test.py new file mode 100644 index 000000000..7b8c985d2 --- /dev/null +++ b/test/unit/groups/vpc_test.py @@ -0,0 +1,107 @@ +import datetime +from test.unit.base import ClientBaseCase + +from linode_api4 import DATE_FORMAT, VPC, VPCSubnet + + +class VPCTest(ClientBaseCase): + """ + Tests methods of the VPC Group + """ + + def test_create_vpc(self): + """ + Tests that you can create a VPC. + """ + + with self.mock_post("/vpcs/123456") as m: + vpc = self.client.vpcs.create("test-vpc", "us-southeast") + + self.assertEqual(m.call_url, "/vpcs") + + self.assertEqual( + m.call_data, + { + "label": "test-vpc", + "region": "us-southeast", + }, + ) + + self.assertEqual(vpc._populated, True) + self.validate_vpc_123456(vpc) + + def test_create_vpc_with_subnet(self): + """ + Tests that you can create a VPC. + """ + + with self.mock_post("/vpcs/123456") as m: + vpc = self.client.vpcs.create( + "test-vpc", + "us-southeast", + subnets=[{"label": "test-subnet", "ipv4": "10.0.0.0/24"}], + ) + + self.assertEqual(m.call_url, "/vpcs") + + self.assertEqual( + m.call_data, + { + "label": "test-vpc", + "region": "us-southeast", + "subnets": [ + {"label": "test-subnet", "ipv4": "10.0.0.0/24"} + ], + }, + ) + + self.assertEqual(vpc._populated, True) + self.validate_vpc_123456(vpc) + + def test_list_ips(self): + """ + Validates that all VPC IPs can be listed. + """ + + with self.mock_get("/vpcs/ips") as m: + result = self.client.vpcs.ips() + + assert m.call_url == "/vpcs/ips" + assert len(result) == 1 + + ip = result[0] + assert ip.address == "10.0.0.2" + assert ip.address_range is None + assert ip.vpc_id == 123 + assert ip.subnet_id == 456 + assert ip.region == "us-mia" + assert ip.linode_id == 123 + assert ip.config_id == 456 + assert ip.interface_id == 789 + assert ip.active + assert ip.nat_1_1 == "172.233.179.133" + assert ip.gateway == "10.0.0.1" + assert ip.prefix == 24 + assert ip.subnet_mask == "255.255.255.0" + + def validate_vpc_123456(self, vpc: VPC): + expected_dt = datetime.datetime.strptime( + "2018-01-01T00:01:01", DATE_FORMAT + ) + + self.assertEqual(vpc.label, "test-vpc") + self.assertEqual(vpc.description, "A very real VPC.") + self.assertEqual(vpc.region.id, "us-southeast") + self.assertEqual(vpc.created, expected_dt) + self.assertEqual(vpc.updated, expected_dt) + + def validate_vpc_subnet_789(self, subnet: VPCSubnet): + expected_dt = datetime.datetime.strptime( + "2018-01-01T00:01:01", DATE_FORMAT + ) + + self.assertEqual(subnet.label, "test-subnet") + self.assertEqual(subnet.ipv4, "10.0.0.0/24") + self.assertEqual(subnet.linodes[0].id, 12345) + self.assertEqual(subnet.created, expected_dt) + self.assertEqual(subnet.updated, expected_dt) diff --git a/test/unit/linode_client_test.py b/test/unit/linode_client_test.py new file mode 100644 index 000000000..e82f3562d --- /dev/null +++ b/test/unit/linode_client_test.py @@ -0,0 +1,1480 @@ +from datetime import datetime +from test.unit.base import ClientBaseCase + +from linode_api4 import FirewallCreateDevicesOptions, LongviewSubscription +from linode_api4.objects.beta import BetaProgram +from linode_api4.objects.linode import Instance +from linode_api4.objects.networking import IPAddress +from linode_api4.objects.object_storage import ( + ObjectStorageACL, + ObjectStorageCluster, +) + + +class LinodeClientGeneralTest(ClientBaseCase): + """ + Tests methods of the LinodeClient class that do not live inside of a group. + """ + + def test_get_no_empty_body(self): + """ + Tests that a valid JSON body is passed for a GET call + """ + with self.mock_get("linode/instances") as m: + self.client.regions() + + self.assertEqual(m.call_data_raw, None) + + def test_get_account(self): + a = self.client.account() + self.assertEqual(a._populated, True) + + self.assertEqual(a.first_name, "Test") + self.assertEqual(a.last_name, "Guy") + self.assertEqual(a.email, "support@linode.com") + self.assertEqual(a.phone, "123-456-7890") + self.assertEqual(a.company, "Linode") + self.assertEqual(a.address_1, "3rd & Arch St") + self.assertEqual(a.address_2, "") + self.assertEqual(a.city, "Philadelphia") + self.assertEqual(a.state, "PA") + self.assertEqual(a.country, "US") + self.assertEqual(a.zip, "19106") + self.assertEqual(a.tax_id, "") + self.assertEqual(a.balance, 0) + self.assertEqual( + a.capabilities, + [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Object Storage", + "Linode Interfaces", + ], + ) + + def test_get_regions(self): + r = self.client.regions() + + self.assertEqual(len(r), 11) + for region in r: + self.assertTrue(region._populated) + self.assertIsNotNone(region.id) + self.assertIsNotNone(region.country) + if region.id in ("us-east", "eu-central", "ap-south"): + self.assertEqual( + region.capabilities, + [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Object Storage", + "Linode Interfaces", + ], + ) + else: + self.assertEqual( + region.capabilities, + [ + "Linodes", + "NodeBalancers", + "Block Storage", + "Linode Interfaces", + ], + ) + self.assertEqual(region.status, "ok") + self.assertIsNotNone(region.resolvers) + self.assertIsNotNone(region.resolvers.ipv4) + self.assertIsNotNone(region.resolvers.ipv6) + self.assertEqual(region.site_type, "core") + + def test_get_images(self): + r = self.client.images() + + self.assertEqual(len(r), 4) + for image in r: + self.assertTrue(image._populated) + self.assertIsNotNone(image.id) + + def test_get_domains(self): + """ + Tests that domains can be retrieved and are marshalled properly + """ + r = self.client.domains() + + self.assertEqual(len(r), 1) + domain = r.first() + + self.assertEqual(domain.domain, "example.org") + self.assertEqual(domain.type, "master") + self.assertEqual(domain.id, 12345) + self.assertEqual(domain.axfr_ips, []) + self.assertEqual(domain.retry_sec, 0) + self.assertEqual(domain.ttl_sec, 300) + self.assertEqual(domain.status, "active") + self.assertEqual( + domain.master_ips, + [], + ) + self.assertEqual( + domain.description, + "", + ) + self.assertEqual( + domain.group, + "", + ) + self.assertEqual( + domain.expire_sec, + 0, + ) + self.assertEqual( + domain.soa_email, + "test@example.org", + ) + self.assertEqual(domain.refresh_sec, 0) + + def test_image_create(self): + """ + Tests that an Image can be created successfully + """ + with self.mock_post("images/private/123") as m: + i = self.client.image_create( + 654, "Test-Image", "This is a test", ["test"] + ) + + self.assertIsNotNone(i) + self.assertEqual(i.id, "private/123") + self.assertEqual(i.capabilities[0], "cloud-init") + + self.assertEqual(m.call_url, "/images") + + self.assertEqual( + m.call_data, + { + "disk_id": 654, + "label": "Test-Image", + "description": "This is a test", + "tags": ["test"], + }, + ) + + def test_get_volumes(self): + v = self.client.volumes() + + self.assertEqual(len(v), 4) + self.assertEqual(v[0].label, "block1") + self.assertEqual(v[0].region.id, "us-east-1a") + self.assertEqual(v[1].label, "block2") + self.assertEqual(v[1].size, 100) + self.assertEqual(v[2].size, 200) + self.assertEqual(v[2].label, "block3") + self.assertEqual(v[0].filesystem_path, "this/is/a/file/path") + self.assertEqual(v[0].hardware_type, "hdd") + self.assertEqual(v[1].filesystem_path, "this/is/a/file/path") + self.assertEqual(v[1].linode_label, None) + self.assertEqual(v[2].filesystem_path, "this/is/a/file/path") + self.assertEqual(v[2].hardware_type, "nvme") + + assert v[0].tags == ["something"] + assert v[1].tags == [] + assert v[2].tags == ["attached"] + + def test_get_tags(self): + """ + Tests that a list of Tags can be retrieved as expected + """ + t = self.client.tags() + + self.assertEqual(len(t), 2) + self.assertEqual(t[0].label, "nothing") + self.assertEqual(t[1].label, "something") + + def test_tag_create(self): + """ + Tests that creating a tag works as expected + """ + # tags don't work like a normal RESTful collection, so we have to do this + with self.mock_post({"label": "nothing"}) as m: + t = self.client.tag_create("nothing") + + self.assertIsNotNone(t) + self.assertEqual(t.label, "nothing") + + self.assertEqual(m.call_url, "/tags") + self.assertEqual( + m.call_data, + { + "label": "nothing", + }, + ) + + def test_tag_create_with_ids(self): + """ + Tests that creating a tag with IDs sends the correct request + """ + instance1, instance2 = self.client.linode.instances()[:2] + domain1 = self.client.domains().first() + nodebalancer1, nodebalancer2 = self.client.nodebalancers()[:2] + volume1, volume2 = self.client.volumes()[:2] + + # tags don't work like a normal RESTful collection, so we have to do this + with self.mock_post({"label": "pytest"}) as m: + t = self.client.tag_create( + "pytest", + instances=[instance1.id, instance2], + nodebalancers=[nodebalancer1.id, nodebalancer2], + domains=[domain1.id], + volumes=[volume1.id, volume2], + ) + + self.assertIsNotNone(t) + self.assertEqual(t.label, "pytest") + + self.assertEqual(m.call_url, "/tags") + self.assertEqual( + m.call_data, + { + "label": "pytest", + "linodes": [instance1.id, instance2.id], + "domains": [domain1.id], + "nodebalancers": [nodebalancer1.id, nodebalancer2.id], + "volumes": [volume1.id, volume2.id], + }, + ) + + def test_tag_create_with_entities(self): + """ + Tests that creating a tag with entities sends the correct request + """ + instance1, instance2 = self.client.linode.instances()[:2] + domain = self.client.domains().first() + nodebalancer = self.client.nodebalancers().first() + volume = self.client.volumes().first() + + # tags don't work like a normal RESTful collection, so we have to do this + with self.mock_post({"label": "pytest"}) as m: + t = self.client.tag_create( + "pytest", + entities=[instance1, domain, nodebalancer, volume, instance2], + ) + + self.assertIsNotNone(t) + self.assertEqual(t.label, "pytest") + + self.assertEqual(m.call_url, "/tags") + self.assertEqual( + m.call_data, + { + "label": "pytest", + "linodes": [instance1.id, instance2.id], + "domains": [domain.id], + "nodebalancers": [nodebalancer.id], + "volumes": [volume.id], + }, + ) + + def test_override_ca(self): + """ + Tests that the CA file used for API requests can be overridden. + """ + self.client.ca_path = "foobar" + + called = False + + old_get = self.client.session.get + + def get_mock(*params, verify=True, **kwargs): + nonlocal called + called = True + assert verify == "foobar" + return old_get(*params, **kwargs) + + self.client.session.get = get_mock + + self.client.linode.instances() + + assert called + + def test_custom_verify(self): + """ + If we set a custom `verify` value on our session, + we want it preserved. + """ + called = False + + self.client.session.verify = False + old_get = self.client.session.get + + def get_mock(*params, verify=True, **kwargs): + nonlocal called + called = True + assert verify is False + return old_get(*params, **kwargs) + + self.client.session.get = get_mock + + self.client.linode.instances() + + assert called + + +class MaintenanceGroupTest(ClientBaseCase): + """ + Tests methods of the MaintenanceGroup + """ + + def test_maintenance(self): + """ + Tests that maintenance can be retrieved + Tests that maintenance can be retrieved + """ + with self.mock_get("/maintenance/policies") as m: + result = self.client.maintenance.maintenance_policies() + + self.assertEqual(m.call_url, "/maintenance/policies") + self.assertEqual(len(result), 3) + + policy_migrate = result[0] + policy_power_off_on = result[1] + policy_custom = result[2] + + self.assertEqual(policy_migrate.slug, "linode/migrate") + self.assertEqual(policy_migrate.label, "Migrate") + self.assertEqual( + policy_migrate.description, + "Migrates the Linode to a new host while it remains fully operational. Recommended for maximizing availability.", + ) + self.assertEqual(policy_migrate.type, "migrate") + self.assertEqual(policy_migrate.notification_period_sec, 3600) + self.assertTrue(policy_migrate.is_default) + + self.assertEqual(policy_power_off_on.slug, "linode/power_off_on") + self.assertEqual(policy_power_off_on.label, "Power Off/Power On") + self.assertEqual( + policy_power_off_on.description, + "Powers off the Linode at the start of the maintenance event and reboots it once the maintenance finishes. Recommended for maximizing performance.", + ) + self.assertEqual(policy_power_off_on.type, "power_off_on") + self.assertEqual(policy_power_off_on.notification_period_sec, 1800) + self.assertFalse(policy_power_off_on.is_default) + + self.assertEqual(policy_custom.slug, "private/12345") + self.assertEqual( + policy_custom.label, "Critical Workload - Avoid Migration" + ) + self.assertEqual( + policy_custom.description, + "Custom policy designed to power off and perform maintenance during user-defined windows only.", + ) + self.assertEqual(policy_custom.type, "power_off_on") + self.assertEqual(policy_custom.notification_period_sec, 7200) + self.assertFalse(policy_custom.is_default) + + +class AccountGroupTest(ClientBaseCase): + """ + Tests methods of the AccountGroup + """ + + def test_get_settings(self): + """ + Tests that account settings can be retrieved. + """ + s = self.client.account.settings() + self.assertEqual(s._populated, True) + + self.assertEqual(s.network_helper, False) + self.assertEqual(s.managed, False) + self.assertEqual(type(s.longview_subscription), LongviewSubscription) + self.assertEqual(s.longview_subscription.id, "longview-100") + self.assertEqual(s.object_storage, "active") + + def test_get_invoices(self): + """ + Tests that invoices can be retrieved + """ + i = self.client.account.invoices() + + self.assertEqual(len(i), 1) + invoice = i[0] + + self.assertEqual(invoice.id, 123456) + self.assertEqual(invoice.date, datetime(2015, 1, 1, 5, 1, 2)) + self.assertEqual(invoice.label, "Invoice #123456") + self.assertEqual(invoice.total, 9.51) + + def test_logins(self): + """ + Tests that logins can be retrieved + """ + logins = self.client.account.logins() + self.assertEqual(len(logins), 1) + self.assertEqual(logins[0].id, 1234) + + def test_maintenance(self): + """ + Tests that maintenance can be retrieved + """ + with self.mock_get("/account/maintenance") as m: + result = self.client.account.maintenance() + + self.assertEqual(m.call_url, "/account/maintenance") + self.assertEqual(len(result), 2) + + maintenance_1 = result[0] + maintenance_2 = result[1] + + # First maintenance + self.assertEqual( + maintenance_1.reason, + "Scheduled upgrade to faster NVMe hardware.", + ) + self.assertEqual(maintenance_1.entity.id, 1234) + self.assertEqual(maintenance_1.entity.label, "Linode #1234") + self.assertEqual(maintenance_1.entity.type, "linode") + self.assertEqual(maintenance_1.entity.url, "/linodes/1234") + self.assertEqual( + maintenance_1.maintenance_policy_set, "linode/power_off_on" + ) + self.assertEqual(maintenance_1.description, "Scheduled Maintenance") + self.assertEqual(maintenance_1.source, "platform") + self.assertEqual(maintenance_1.not_before, "2025-03-25T10:00:00Z") + self.assertEqual(maintenance_1.start_time, "2025-03-25T12:00:00Z") + self.assertEqual( + maintenance_1.complete_time, "2025-03-25T14:00:00Z" + ) + self.assertEqual(maintenance_1.status, "scheduled") + self.assertEqual(maintenance_1.type, "linode_migrate") + + # Second maintenance + self.assertEqual( + maintenance_2.reason, + "Pending migration of Linode #1234 to a new host.", + ) + self.assertEqual(maintenance_2.entity.id, 1234) + self.assertEqual(maintenance_2.entity.label, "Linode #1234") + self.assertEqual(maintenance_2.entity.type, "linode") + self.assertEqual(maintenance_2.entity.url, "/linodes/1234") + self.assertEqual( + maintenance_2.maintenance_policy_set, "linode/migrate" + ) + self.assertEqual(maintenance_2.description, "Emergency Maintenance") + self.assertEqual(maintenance_2.source, "user") + self.assertEqual(maintenance_2.not_before, "2025-03-26T15:00:00Z") + self.assertEqual(maintenance_2.start_time, "2025-03-26T15:00:00Z") + self.assertEqual( + maintenance_2.complete_time, "2025-03-26T17:00:00Z" + ) + self.assertEqual(maintenance_2.status, "in-progress") + self.assertEqual(maintenance_2.type, "linode_migrate") + + def test_notifications(self): + """ + Tests that notifications can be retrieved + """ + with self.mock_get("/account/notifications") as m: + result = self.client.account.notifications() + self.assertEqual(m.call_url, "/account/notifications") + self.assertEqual(len(result), 1) + self.assertEqual( + result[0].label, "You have an important ticket open!" + ) + + def test_payment_methods(self): + """ + Tests that payment methods can be retrieved + """ + paymentMethods = self.client.account.payment_methods() + self.assertEqual(len(paymentMethods), 1) + self.assertEqual(paymentMethods[0].id, 123) + + def test_add_payment_method(self): + """ + Tests that adding a payment method creates the correct api request. + """ + with self.mock_post({}) as m: + self.client.account.add_payment_method( + { + "card_number": "123456789100", + "expiry_month": 1, + "expiry_year": 2028, + "cvv": 111, + }, + True, + "credit_card", + ) + self.assertEqual(m.call_url, "/account/payment-methods") + self.assertEqual(m.call_data["type"], "credit_card") + self.assertTrue(m.call_data["is_default"]) + self.assertIsNotNone(m.call_data["data"]) + + def test_add_promo_code(self): + """ + Tests that adding a promo code creates the correct api request. + """ + with self.mock_post("/account/promo-codes") as m: + self.client.account.add_promo_code("123promo456") + self.assertEqual(m.call_url, "/account/promo-codes") + self.assertEqual(m.call_data["promo_code"], "123promo456") + + def test_service_transfers(self): + """ + Tests that service transfers can be retrieved + """ + serviceTransfers = self.client.account.service_transfers() + self.assertEqual(len(serviceTransfers), 1) + self.assertEqual( + serviceTransfers[0].token, "123E4567-E89B-12D3-A456-426614174000" + ) + + def test_linode_managed_enable(self): + """ + Tests that enabling linode managed creates the correct api request. + """ + with self.mock_post({}) as m: + self.client.account.linode_managed_enable() + self.assertEqual(m.call_url, "/account/settings/managed-enable") + + def test_service_transfer_create(self): + """ + Tests that creating a service transfer creates the correct api request. + """ + data = {"linodes": [111, 222]} + response = { + "created": "2021-02-11T16:37:03", + "entities": {"linodes": [111, 222]}, + "expiry": "2021-02-12T16:37:03", + "is_sender": True, + "status": "pending", + "token": "123E4567-E89B-12D3-A456-426614174000", + "updated": "2021-02-11T16:37:03", + } + + with self.mock_post(response) as m: + self.client.account.service_transfer_create(data) + self.assertEqual(m.call_url, "/account/service-transfers") + self.assertEqual(m.call_data["entities"], data) + + def test_payments(self): + """ + Tests that payments can be retrieved + """ + p = self.client.account.payments() + + self.assertEqual(len(p), 1) + payment = p[0] + + self.assertEqual(payment.id, 123456) + self.assertEqual(payment.date, datetime(2015, 1, 1, 5, 1, 2)) + self.assertEqual(payment.usd, 1000) + + def test_enrolled_betas(self): + """ + Tests that enrolled beta programs can be retrieved + """ + enrolled_betas = self.client.account.enrolled_betas() + + self.assertEqual(len(enrolled_betas), 1) + beta = enrolled_betas[0] + + self.assertEqual(beta.id, "cool") + self.assertEqual(beta.enrolled, datetime(2018, 1, 2, 3, 4, 5)) + self.assertEqual(beta.started, datetime(2018, 1, 2, 3, 4, 5)) + self.assertEqual(beta.ended, datetime(2018, 1, 2, 3, 4, 5)) + + def test_join_beta_program(self): + """ + Tests that user can join a beta program + """ + join_beta_url = "/account/betas" + with self.mock_post({}) as m: + self.client.account.join_beta_program("cool_beta") + self.assertEqual( + m.call_data, + { + "id": "cool_beta", + }, + ) + self.assertEqual(m.call_url, join_beta_url) + + # Test that user can join a beta program with an BetaProgram object + with self.mock_post({}) as m: + self.client.account.join_beta_program( + BetaProgram(self.client, "cool_beta") + ) + self.assertEqual( + m.call_data, + { + "id": "cool_beta", + }, + ) + self.assertEqual(m.call_url, join_beta_url) + + def test_account_transfer(self): + """ + Tests that payments can be retrieved + """ + transfer = self.client.account.transfer() + + self.assertEqual(transfer.quota, 471) + self.assertEqual(transfer.used, 737373) + self.assertEqual(transfer.billable, 0) + + self.assertEqual(len(transfer.region_transfers), 1) + self.assertEqual(transfer.region_transfers[0].id, "ap-west") + self.assertEqual(transfer.region_transfers[0].used, 1) + self.assertEqual(transfer.region_transfers[0].quota, 5010) + self.assertEqual(transfer.region_transfers[0].billable, 0) + + def test_account_availabilities(self): + """ + Tests that account availabilities can be retrieved + """ + availabilities = self.client.account.availabilities() + + self.assertEqual(len(availabilities), 11) + availability = availabilities[0] + + self.assertEqual(availability.region, "ap-west") + self.assertEqual(availability.unavailable, []) + + +class BetaProgramGroupTest(ClientBaseCase): + """ + Tests methods of the BetaProgramGroup + """ + + def test_betas(self): + """ + Test that available beta programs can be retrieved + """ + betas = self.client.beta.betas() + + self.assertEqual(len(betas), 2) + beta = betas[0] + self.assertEqual(beta.id, "active_closed") + self.assertEqual(beta.label, "active closed beta") + self.assertEqual(beta.started, datetime(2023, 7, 19, 15, 23, 43)) + self.assertEqual(beta.ended, None) + self.assertEqual(beta.greenlight_only, True) + self.assertEqual(beta.more_info, "a link with even more info") + + +class LinodeGroupTest(ClientBaseCase): + """ + Tests methods of the LinodeGroup + """ + + def test_instance_create(self): + """ + Tests that a Linode Instance can be created successfully + """ + with self.mock_post("linode/instances/123") as m: + l = self.client.linode.instance_create( + "g6-standard-1", "us-east-1a" + ) + + self.assertIsNotNone(l) + self.assertEqual(l.id, 123) + + self.assertEqual(m.call_url, "/linode/instances") + + self.assertEqual( + m.call_data, {"region": "us-east-1a", "type": "g6-standard-1"} + ) + + def test_instance_create_with_image(self): + """ + Tests that a Linode Instance can be created with an image, and a password generated + """ + with self.mock_post("linode/instances/123") as m: + l, pw = self.client.linode.instance_create( + "g6-standard-1", "us-east-1a", image="linode/debian9" + ) + + self.assertIsNotNone(l) + self.assertEqual(l.id, 123) + + self.assertEqual(m.call_url, "/linode/instances") + + self.assertEqual( + m.call_data, + { + "region": "us-east-1a", + "type": "g6-standard-1", + "image": "linode/debian9", + "root_pass": pw, + }, + ) + + +class LongviewGroupTest(ClientBaseCase): + """ + Tests methods of the LongviewGroup + """ + + def test_get_clients(self): + """ + Tests that a list of LongviewClients can be retrieved + """ + r = self.client.longview.clients() + + self.assertEqual(len(r), 2) + self.assertEqual(r[0].label, "test_client_1") + self.assertEqual(r[0].id, 1234) + self.assertEqual(r[1].label, "longview5678") + self.assertEqual(r[1].id, 5678) + + def test_client_create(self): + """ + Tests that creating a client calls the api correctly + """ + with self.mock_post("longview/clients/5678") as m: + client = self.client.longview.client_create() + + self.assertIsNotNone(client) + self.assertEqual(client.id, 5678) + self.assertEqual(client.label, "longview5678") + + self.assertEqual(m.call_url, "/longview/clients") + self.assertEqual(m.call_data, {}) + + def test_client_create_with_label(self): + """ + Tests that creating a client with a label calls the api correctly + """ + with self.mock_post("longview/clients/1234") as m: + client = self.client.longview.client_create(label="test_client_1") + + self.assertIsNotNone(client) + self.assertEqual(client.id, 1234) + self.assertEqual(client.label, "test_client_1") + + self.assertEqual(m.call_url, "/longview/clients") + self.assertEqual(m.call_data, {"label": "test_client_1"}) + + def test_update_plan(self): + """ + Tests that you can submit a correct longview plan update api request + """ + with self.mock_post("/longview/plan") as m: + result = self.client.longview.longview_plan_update("longview-100") + self.assertEqual(m.call_url, "/longview/plan") + self.assertEqual( + m.call_data["longview_subscription"], "longview-100" + ) + self.assertEqual(result.id, "longview-10") + self.assertEqual(result.clients_included, 10) + self.assertEqual(result.label, "Longview Pro 10 pack") + self.assertIsNotNone(result.price) + + def test_get_subscriptions(self): + """ + Tests that Longview subscriptions can be retrieved + """ + + with self.mock_get("longview/subscriptions") as m: + r = self.client.longview.subscriptions() + self.assertEqual(m.call_url, "/longview/subscriptions") + + self.assertEqual(len(r), 4) + + expected_results = ( + ("longview-10", "Longview Pro 10 pack"), + ("longview-100", "Longview Pro 100 pack"), + ("longview-3", "Longview Pro 3 pack"), + ("longview-40", "Longview Pro 40 pack"), + ) + + for result, (expected_id, expected_label) in zip(r, expected_results): + self.assertEqual(result.id, expected_id) + self.assertEqual(result.label, expected_label) + + +class LKEGroupTest(ClientBaseCase): + """ + Tests methods of the LKEGroupTest + """ + + def test_kube_version(self): + """ + Tests that KubeVersions can be retrieved + """ + versions = self.client.lke.versions() + self.assertEqual(len(versions), 3) + self.assertEqual(versions[0].id, "1.19") + self.assertEqual(versions[1].id, "1.18") + self.assertEqual(versions[2].id, "1.17") + + def test_cluster_create_with_api_objects(self): + """ + Tests clusters can be created using api objects + """ + region = self.client.regions().first() + node_type = self.client.linode.types()[0] + version = self.client.lke.versions()[0] + node_pools = self.client.lke.node_pool(node_type, 3) + with self.mock_post("lke/clusters") as m: + cluster = self.client.lke.cluster_create( + region, "example-cluster", version, node_pools + ) + self.assertEqual(m.call_data["region"], "ap-west") + self.assertEqual( + m.call_data["node_pools"], [{"type": "g6-nanode-1", "count": 3}] + ) + self.assertEqual(m.call_data["k8s_version"], "1.19") + + self.assertEqual(cluster.id, 18881) + self.assertEqual(cluster.region.id, "ap-west") + self.assertEqual(cluster.k8s_version.id, "1.19") + + def test_lke_types(self): + """ + Tests that a list of LKETypes can be retrieved + """ + types = self.client.lke.types() + self.assertEqual(len(types), 2) + self.assertEqual(types[1].id, "lke-ha") + self.assertEqual(types[1].price.hourly, 0.09) + self.assertEqual(types[1].price.monthly, 60) + self.assertEqual(types[1].region_prices[0].id, "id-cgk") + self.assertEqual(types[1].region_prices[0].hourly, 0.108) + self.assertEqual(types[1].region_prices[0].monthly, 72) + + def test_cluster_create_with_string_repr(self): + """ + Tests clusters can be created using string representations + """ + with self.mock_post("lke/clusters") as m: + cluster = self.client.lke.cluster_create( + "ap-west", + "example-cluster", + "1.19", + {"type": "g6-standard-1", "count": 3}, + ) + self.assertEqual(m.call_data["region"], "ap-west") + self.assertEqual( + m.call_data["node_pools"], + [{"type": "g6-standard-1", "count": 3}], + ) + self.assertEqual(m.call_data["k8s_version"], "1.19") + + self.assertEqual(cluster.id, 18881) + self.assertEqual(cluster.region.id, "ap-west") + self.assertEqual(cluster.k8s_version.id, "1.19") + + +class ProfileGroupTest(ClientBaseCase): + """ + Tests methods of the ProfileGroup + """ + + def test_trusted_devices(self): + devices = self.client.profile.trusted_devices() + self.assertEqual(len(devices), 1) + self.assertEqual(devices[0].id, 123) + + def test_logins(self): + logins = self.client.profile.logins() + self.assertEqual(len(logins), 1) + self.assertEqual(logins[0].id, 123) + + def test_phone_number_delete(self): + with self.mock_delete() as m: + self.client.profile.phone_number_delete() + self.assertEqual(m.call_url, "/profile/phone-number") + + def test_phone_number_verify(self): + with self.mock_post({}) as m: + self.client.profile.phone_number_verify("123456") + self.assertEqual(m.call_url, "/profile/phone-number/verify") + self.assertEqual(m.call_data["otp_code"], "123456") + + def test_phone_number_verification_code_send(self): + with self.mock_post({}) as m: + self.client.profile.phone_number_verification_code_send( + "us", "1234567890" + ) + self.assertEqual(m.call_url, "/profile/phone-number") + self.assertEqual(m.call_data["iso_code"], "us") + self.assertEqual(m.call_data["phone_number"], "1234567890") + + def test_user_preferences(self): + with self.mock_get("/profile/preferences") as m: + result = self.client.profile.user_preferences() + self.assertEqual(m.call_url, "/profile/preferences") + self.assertEqual(result.key1, "value1") + self.assertEqual(result.key2, "value2") + + def test_user_preferences_update(self): + with self.mock_put("/profile/preferences") as m: + self.client.profile.user_preferences_update( + key1="value3", key2="value4" + ) + self.assertEqual(m.call_url, "/profile/preferences") + self.assertEqual(m.call_data["key1"], "value3") + self.assertEqual(m.call_data["key2"], "value4") + + def test_security_questions(self): + with self.mock_get("/profile/security-questions") as m: + result = self.client.profile.security_questions() + self.assertEqual(m.call_url, "/profile/security-questions") + self.assertEqual(result.security_questions[0].id, 1) + self.assertEqual( + result.security_questions[0].question, + "In what city were you born?", + ) + self.assertEqual( + result.security_questions[0].response, "Gotham City" + ) + + def test_security_questions_answer(self): + with self.mock_post("/profile/security-questions") as m: + self.client.profile.security_questions_answer( + [ + {"question_id": 1, "response": "secret answer 1"}, + {"question_id": 2, "response": "secret answer 2"}, + {"question_id": 3, "response": "secret answer 3"}, + ] + ) + self.assertEqual(m.call_url, "/profile/security-questions") + + self.assertEqual( + m.call_data["security_questions"][0]["question_id"], 1 + ) + self.assertEqual( + m.call_data["security_questions"][1]["question_id"], 2 + ) + self.assertEqual( + m.call_data["security_questions"][2]["question_id"], 3 + ) + + def test_get_sshkeys(self): + """ + Tests that a list of SSH Keys can be retrieved + """ + r = self.client.profile.ssh_keys() + + self.assertEqual(len(r), 2) + + key1, key2 = r + + self.assertEqual(key1.label, "Home Ubuntu PC") + self.assertEqual( + key1.created, + datetime(year=2018, month=9, day=14, hour=13, minute=0, second=0), + ) + self.assertEqual(key1.id, 22) + self.assertEqual( + key1.ssh_key, + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98" + "ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCD" + "eWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWO" + "TVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3i" + "muc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRo" + "F2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoC" + "MGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qy" + "nZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz" + "/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfz" + "tUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36v" + "TvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eH" + "d1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23b" + "cKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCI" + "CmhW7erNJNVxYjtzseGpBLmRRUTsT038w== dorthu@dorthu-command", + ) + + def test_client_create(self): + """ + Tests that creating a client calls the api correctly + """ + with self.mock_post("longview/clients/5678") as m: + client = self.client.longview.client_create() + + self.assertIsNotNone(client) + self.assertEqual(client.id, 5678) + self.assertEqual(client.label, "longview5678") + + self.assertEqual(m.call_url, "/longview/clients") + self.assertEqual(m.call_data, {}) + + def test_ssh_key_create(self): + """ + Tests that creating an ssh key works as expected + """ + with self.mock_post("profile/sshkeys/72") as m: + key = self.client.profile.ssh_key_upload( + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98" + "ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCD" + "eWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWO" + "TVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3i" + "muc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRo" + "F2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoC" + "MGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qy" + "nZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz" + "/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfz" + "tUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36v" + "TvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eH" + "d1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23b" + "cKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCI" + "CmhW7erNJNVxYjtzseGpBLmRRUTsT038w==dorthu@dorthu-command", + "Work Laptop", + ) + + self.assertIsNotNone(key) + self.assertEqual(key.id, 72) + self.assertEqual(key.label, "Work Laptop") + + self.assertEqual(m.call_url, "/profile/sshkeys") + self.assertEqual( + m.call_data, + { + "ssh_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98" + "ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCD" + "eWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWO" + "TVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3i" + "muc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRo" + "F2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoC" + "MGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qy" + "nZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz" + "/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfz" + "tUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36v" + "TvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eH" + "d1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23b" + "cKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCI" + "CmhW7erNJNVxYjtzseGpBLmRRUTsT038w==dorthu@dorthu-command", + "label": "Work Laptop", + }, + ) + + +class ObjectStorageGroupTest(ClientBaseCase): + """ + Tests for the ObjectStorageGroup + """ + + def test_get_clusters(self): + """ + Tests that Object Storage Clusters can be retrieved + """ + clusters = self.client.object_storage.clusters() + + self.assertEqual(len(clusters), 1) + cluster = clusters[0] + + self.assertEqual(cluster.id, "us-east-1") + self.assertEqual(cluster.region.id, "us-east") + self.assertEqual(cluster.domain, "us-east-1.linodeobjects.com") + self.assertEqual( + cluster.static_site_domain, "website-us-east-1.linodeobjects.com" + ) + + def test_get_keys(self): + """ + Tests that you can retrieve Object Storage Keys + """ + keys = self.client.object_storage.keys() + + self.assertEqual(len(keys), 2) + key1 = keys[0] + key2 = keys[1] + + self.assertEqual(key1.id, 1) + self.assertEqual(key1.label, "object-storage-key-1") + self.assertEqual(key1.access_key, "testAccessKeyHere123") + self.assertEqual(key1.secret_key, "[REDACTED]") + + self.assertEqual(key2.id, 2) + self.assertEqual(key2.label, "object-storage-key-2") + self.assertEqual(key2.access_key, "testAccessKeyHere456") + self.assertEqual(key2.secret_key, "[REDACTED]") + + def test_object_storage_types(self): + """ + Tests that a list of ObjectStorageTypes can be retrieved + """ + types = self.client.object_storage.types() + self.assertEqual(len(types), 1) + self.assertEqual(types[0].id, "objectstorage") + self.assertEqual(types[0].label, "Object Storage") + self.assertEqual(types[0].price.hourly, 0.0015) + self.assertEqual(types[0].price.monthly, 0.1) + self.assertEqual(types[0].region_prices[0].id, "us-east") + self.assertEqual(types[0].region_prices[0].hourly, 0.00018) + self.assertEqual(types[0].region_prices[0].monthly, 0.12) + self.assertEqual(types[0].transfer, 0) + + def test_keys_create(self): + """ + Tests that you can create Object Storage Keys + """ + with self.mock_post("object-storage/keys/1") as m: + keys = self.client.object_storage.keys_create( + "object-storage-key-1" + ) + + self.assertIsNotNone(keys) + self.assertEqual(keys.id, 1) + self.assertEqual(keys.label, "object-storage-key-1") + + self.assertEqual(m.call_url, "/object-storage/keys") + self.assertEqual(m.call_data, {"label": "object-storage-key-1"}) + + def test_limited_keys_create(self): + """ + Tests that you can create Object Storage Keys + """ + with self.mock_post("object-storage/keys/2") as m: + keys = self.client.object_storage.keys_create( + "object-storage-key-1", + self.client.object_storage.bucket_access( + "us-east", + "example-bucket", + "read_only", + ), + ["us-east"], + ) + + self.assertIsNotNone(keys) + self.assertEqual(keys.id, 2) + self.assertEqual(keys.label, "object-storage-key-2") + + self.assertEqual(m.call_url, "/object-storage/keys") + self.assertEqual( + m.call_data, + { + "label": "object-storage-key-1", + "bucket_access": [ + { + "permissions": "read_only", + "bucket_name": "example-bucket", + "region": "us-east", + } + ], + "regions": ["us-east"], + }, + ) + + def test_transfer(self): + """ + Test that you can get the amount of outbound data transfer + used by your accountโ€™s Object Storage buckets + """ + object_storage_transfer_url = "/object-storage/transfer" + + with self.mock_get(object_storage_transfer_url) as m: + result = self.client.object_storage.transfer() + self.assertEqual(result.used, 12956600198) + self.assertEqual(m.call_url, object_storage_transfer_url) + + def test_buckets(self): + """ + Test that Object Storage Buckets can be reterived + """ + object_storage_buckets_url = "/object-storage/buckets" + + with self.mock_get(object_storage_buckets_url) as m: + buckets = self.client.object_storage.buckets() + self.assertIsNotNone(buckets) + bucket = buckets[0] + + self.assertEqual(m.call_url, object_storage_buckets_url) + self.assertEqual(bucket.cluster, "us-east-1") + self.assertEqual( + bucket.created, + datetime( + year=2019, month=1, day=1, hour=1, minute=23, second=45 + ), + ) + self.assertEqual( + bucket.hostname, "example-bucket.us-east-1.linodeobjects.com" + ) + self.assertEqual(bucket.label, "example-bucket") + self.assertEqual(bucket.objects, 4) + self.assertEqual(bucket.size, 188318981) + + def test_bucket_create(self): + """ + Test that you can create a Object Storage Bucket + """ + # buckets don't work like a normal RESTful collection, so we have to do this + with self.mock_post( + {"label": "example-bucket", "cluster": "us-east-1"} + ) as m: + b = self.client.object_storage.bucket_create( + "us-east-1", "example-bucket", ObjectStorageACL.PRIVATE, True + ) + self.assertIsNotNone(b) + self.assertEqual(m.call_url, "/object-storage/buckets") + self.assertEqual( + m.call_data, + { + "label": "example-bucket", + "cluster": "us-east-1", + "cors_enabled": True, + "acl": "private", + }, + ) + + """ + Test that you can create a Object Storage Bucket passing a Cluster object + """ + with self.mock_post( + {"label": "example-bucket", "cluster": "us-east-1"} + ) as m: + cluster = ObjectStorageCluster(self.client, "us-east-1") + b = self.client.object_storage.bucket_create( + cluster, "example-bucket", "private", True + ) + self.assertIsNotNone(b) + self.assertEqual(m.call_url, "/object-storage/buckets") + self.assertEqual( + m.call_data, + { + "label": "example-bucket", + "cluster": "us-east-1", + "cors_enabled": True, + "acl": "private", + }, + ) + + def test_object_url_create(self): + """ + Test that you can create pre-signed URL to access a single Object in a bucket. + """ + object_url_create_url = ( + "/object-storage/buckets/us-east-1/example-bucket/object-url" + ) + with self.mock_post(object_url_create_url) as m: + result = self.client.object_storage.object_url_create( + "us-east-1", "example-bucket", "GET", "example" + ) + self.assertIsNotNone(result) + self.assertEqual(m.call_url, object_url_create_url) + self.assertEqual( + result.url, + "https://us-east-1.linodeobjects.com/example-bucket/example?Signature=qr98TEucCntPgEG%2BsZQGDsJg93c%3D&Expires=1567609905&AWSAccessKeyId=G4YAF81XWY61DQM94SE0", + ) + self.assertEqual( + m.call_data, + { + "method": "GET", + "name": "example", + "expires_in": 3600, + }, + ) + + +class NetworkingGroupTest(ClientBaseCase): + """ + Tests for the NetworkingGroup + """ + + def test_get_vlans(self): + """ + Tests that Object Storage Clusters can be retrieved + """ + vlans = self.client.networking.vlans() + + self.assertEqual(len(vlans), 1) + self.assertEqual(vlans[0].label, "vlan-test") + self.assertEqual(vlans[0].region.id, "us-southeast") + + self.assertEqual(len(vlans[0].linodes), 2) + self.assertEqual(vlans[0].linodes[0], 111) + self.assertEqual(vlans[0].linodes[1], 222) + + def test_firewall_create(self): + with self.mock_post("networking/firewalls/123") as m: + rules = { + "outbound": [], + "outbound_policy": "DROP", + "inbound": [], + "inbound_policy": "DROP", + } + + f = self.client.networking.firewall_create( + "test-firewall-1", + rules, + devices=FirewallCreateDevicesOptions( + linodes=[123], nodebalancers=[456], linode_interfaces=[789] + ), + status="enabled", + ) + + self.assertIsNotNone(f) + + self.assertEqual(m.call_url, "/networking/firewalls") + self.assertEqual(m.method, "post") + + self.assertEqual(f.id, 123) + self.assertEqual( + m.call_data, + { + "label": "test-firewall-1", + "status": "enabled", + "rules": rules, + "devices": { + "linodes": [123], + "nodebalancers": [456], + "linode_interfaces": [789], + }, + }, + ) + + def test_get_firewalls(self): + """ + Tests that firewalls can be retrieved + """ + f = self.client.networking.firewalls() + + self.assertEqual(len(f), 1) + firewall = f[0] + + self.assertEqual(firewall.id, 123) + + def test_get_firewall_settings(self): + """ + Tests that firewall settings can be retrieved + """ + settings = self.client.networking.firewall_settings() + + assert settings.default_firewall_ids.vpc_interface == 123 + assert settings.default_firewall_ids.public_interface == 456 + assert settings.default_firewall_ids.linode == 789 + assert settings.default_firewall_ids.nodebalancer == 321 + + settings.invalidate() + + assert settings.default_firewall_ids.vpc_interface == 123 + assert settings.default_firewall_ids.public_interface == 456 + assert settings.default_firewall_ids.linode == 789 + assert settings.default_firewall_ids.nodebalancer == 321 + + def test_update_firewall_settings(self): + """ + Tests that firewall settings can be updated + """ + settings = self.client.networking.firewall_settings() + + settings.default_firewall_ids.vpc_interface = 321 + settings.default_firewall_ids.public_interface = 654 + settings.default_firewall_ids.linode = 987 + settings.default_firewall_ids.nodebalancer = 123 + + with self.mock_put("networking/firewalls/settings") as m: + settings.save() + + assert m.call_data == { + "default_firewall_ids": { + "vpc_interface": 321, + "public_interface": 654, + "linode": 987, + "nodebalancer": 123, + } + } + + def test_ip_addresses_share(self): + """ + Tests that you can submit a correct ip addresses share api request. + """ + + ip = IPAddress(self.client, "192.0.2.1", {}) + linode = Instance(self.client, 123) + + with self.mock_post({}) as m: + self.client.networking.ip_addresses_share(["192.0.2.1"], 123) + self.assertEqual(m.call_url, "/networking/ips/share") + self.assertEqual(m.call_data["ips"], ["192.0.2.1"]) + self.assertEqual(m.call_data["linode_id"], 123) + + with self.mock_post({}) as m: + self.client.networking.ip_addresses_share([ip], 123) + self.assertEqual(m.call_url, "/networking/ips/share") + self.assertEqual(m.call_data["ips"], ["192.0.2.1"]) + self.assertEqual(m.call_data["linode_id"], 123) + + with self.mock_post({}) as m: + self.client.networking.ip_addresses_share(["192.0.2.1"], linode) + self.assertEqual(m.call_url, "/networking/ips/share") + self.assertEqual(m.call_data["ips"], ["192.0.2.1"]) + self.assertEqual(m.call_data["linode_id"], 123) + + def test_ip_addresses_assign(self): + """ + Tests that you can submit a correct ip addresses assign api request. + """ + + with self.mock_post({}) as m: + self.client.networking.ip_addresses_assign( + {"assignments": [{"address": "192.0.2.1", "linode_id": 123}]}, + "us-east", + ) + self.assertEqual(m.call_url, "/networking/ips/assign") + self.assertEqual( + m.call_data["assignments"], + {"assignments": [{"address": "192.0.2.1", "linode_id": 123}]}, + ) + self.assertEqual(m.call_data["region"], "us-east") + + def test_ipv6_ranges(self): + """ + Tests that IPRanges can be retrieved + """ + ranges = self.client.networking.ipv6_ranges() + self.assertEqual(len(ranges), 1) + self.assertEqual(ranges[0].range, "2600:3c01::") + + def test_network_transfer_prices(self): + """ + Tests that a list of NetworkTransferPrices can be retrieved + """ + transfer_prices = self.client.networking.transfer_prices() + self.assertEqual(len(transfer_prices), 2) + self.assertEqual(transfer_prices[1].id, "network_transfer") + self.assertEqual(transfer_prices[1].price.hourly, 0.005) + self.assertEqual(transfer_prices[1].price.monthly, None) + self.assertEqual(len(transfer_prices[1].region_prices), 2) + self.assertEqual(transfer_prices[1].region_prices[0].id, "id-cgk") + self.assertEqual(transfer_prices[1].region_prices[0].hourly, 0.015) + self.assertEqual(transfer_prices[1].region_prices[0].monthly, None) + + +class NodeBalancerGroupTest(ClientBaseCase): + """ + Tests methods of the NodeBalancerGroup + """ + + def test_nodebalancer_types(self): + """ + Tests that a list of NodebalancerTypes can be retrieved + """ + types = self.client.nodebalancers.types() + self.assertEqual(len(types), 1) + self.assertEqual(types[0].id, "nodebalancer") + self.assertEqual(types[0].price.hourly, 0.015) + self.assertEqual(types[0].price.monthly, 10) + self.assertEqual(len(types[0].region_prices), 2) + self.assertEqual(types[0].region_prices[0].id, "id-cgk") + self.assertEqual(types[0].region_prices[0].hourly, 0.018) + self.assertEqual(types[0].region_prices[0].monthly, 12) + + +class VolumeGroupTest(ClientBaseCase): + """ + Tests methods of the VolumeGroup + """ + + def test_volume_types(self): + """ + Tests that a list of VolumeTypes can be retrieved + """ + types = self.client.volumes.types() + self.assertEqual(len(types), 1) + self.assertEqual(types[0].id, "volume") + self.assertEqual(types[0].price.hourly, 0.00015) + self.assertEqual(types[0].price.monthly, 0.1) + self.assertEqual(len(types[0].region_prices), 2) + self.assertEqual(types[0].region_prices[0].id, "id-cgk") + self.assertEqual(types[0].region_prices[0].hourly, 0.00018) + self.assertEqual(types[0].region_prices[0].monthly, 0.12) diff --git a/test/unit/login_client_test.py b/test/unit/login_client_test.py new file mode 100644 index 000000000..5a17d77c1 --- /dev/null +++ b/test/unit/login_client_test.py @@ -0,0 +1,55 @@ +from unittest import TestCase + +from linode_api4 import OAuthScopes + + +class OAuthScopesTest(TestCase): + def test_parse_scopes_none(self): + """ + Tests parsing no scopes + """ + scopes = OAuthScopes.parse("") + self.assertEqual(scopes, []) + + def test_parse_scopes_single(self): + """ + Tests parsing a single scope + """ + scopes = OAuthScopes.parse("linodes:read_only") + self.assertEqual(scopes, [OAuthScopes.Linodes.read_only]) + + def test_parse_scopes_many(self): + """ + Tests parsing many scopes + """ + scopes = OAuthScopes.parse("linodes:read_only domains:read_write") + self.assertEqual( + scopes, + [OAuthScopes.Linodes.read_only, OAuthScopes.Domains.read_write], + ) + + def test_parse_scopes_many_comma_delimited(self): + """ + Tests parsing many scopes that are comma-delimited (which preserves old behavior) + """ + scopes = OAuthScopes.parse( + "nodebalancers:read_write,stackscripts:*,events:read_only" + ) + self.assertEqual( + scopes, + [ + OAuthScopes.NodeBalancers.read_write, + OAuthScopes.StackScripts.all, + OAuthScopes.Events.read_only, + ], + ) + + def test_parse_scopes_all(self): + """ + Tests parsing * scopes + """ + scopes = OAuthScopes.parse("*") + self.assertEqual( + scopes, + [getattr(c, "all") for c in OAuthScopes._scope_families.values()], + ) diff --git a/test/unit/objects/account_test.py b/test/unit/objects/account_test.py new file mode 100644 index 000000000..da807d182 --- /dev/null +++ b/test/unit/objects/account_test.py @@ -0,0 +1,450 @@ +from collections.abc import Iterable +from copy import deepcopy +from datetime import datetime +from test.unit.base import ClientBaseCase + +from linode_api4 import AccountSettingsInterfacesForNewLinodes +from linode_api4.objects import ( + Account, + AccountAvailability, + AccountBetaProgram, + AccountSettings, + Database, + Domain, + Event, + Firewall, + Image, + Instance, + Invoice, + Login, + LongviewClient, + NodeBalancer, + OAuthClient, + PaymentMethod, + ServiceTransfer, + StackScript, + User, + UserGrants, + Volume, + get_obj_grants, +) +from linode_api4.objects.account import ChildAccount +from linode_api4.objects.vpc import VPC + + +class InvoiceTest(ClientBaseCase): + """ + Tests methods of the Invoice + """ + + def test_get_invoice(self): + """ + Tests that an invoice is loaded correctly by ID + """ + invoice = Invoice(self.client, 123456) + self.assertEqual(invoice._populated, False) + + self.assertEqual(invoice.label, "Invoice #123456") + self.assertEqual(invoice._populated, True) + + self.assertEqual(invoice.date, datetime(2015, 1, 1, 5, 1, 2)) + self.assertEqual(invoice.total, 9.51) + + def test_get_invoice_items(self): + """ + Tests that you can get items for an invoice + """ + invoice = Invoice(self.client, 123456) + items = invoice.items + + self.assertEqual(len(items), 1) + item = items[0] + + self.assertEqual(item.label, "Linode 2048 - Example") + self.assertEqual(item.type, "hourly") + self.assertEqual(item.amount, 9.51) + self.assertEqual(item.quantity, 317) + self.assertEqual(item.unit_price, "0.03") + self.assertEqual( + item.from_date, + datetime(year=2014, month=12, day=19, hour=0, minute=27, second=2), + ) + self.assertEqual( + item.to_date, + datetime(year=2015, month=1, day=1, hour=4, minute=59, second=59), + ) + + def test_get_account(self): + """ + Tests that an account is loaded correctly by email + """ + account = Account(self.client, "support@linode.com", {}) + + self.assertEqual(account.email, "support@linode.com") + self.assertEqual(account.state, "PA") + self.assertEqual(account.city, "Philadelphia") + self.assertEqual(account.phone, "123-456-7890") + self.assertEqual(account.tax_id, "") + self.assertEqual(account.balance, 0) + self.assertEqual(account.company, "Linode") + self.assertEqual(account.address_1, "3rd & Arch St") + self.assertEqual(account.address_2, "") + self.assertEqual(account.zip, "19106") + self.assertEqual(account.first_name, "Test") + self.assertEqual(account.last_name, "Guy") + self.assertEqual(account.country, "US") + self.assertIsNotNone(account.capabilities) + self.assertIsNotNone(account.active_promotions) + self.assertEqual(account.balance_uninvoiced, 145) + self.assertEqual(account.billing_source, "akamai") + self.assertEqual(account.euuid, "E1AF5EEC-526F-487D-B317EBEB34C87D71") + self.assertIn("Linode Interfaces", account.capabilities) + + def test_get_login(self): + """ + Tests that a login is loaded correctly by ID + """ + login = Login(self.client, 123) + + self.assertEqual(login.id, 123) + self.assertEqual(login.ip, "192.0.2.0") + self.assertEqual(login.restricted, True) + self.assertEqual(login.status, "successful") + self.assertEqual(login.username, "test-user") + + def test_get_account_settings(self): + """ + Tests that account settings are loaded correctly + """ + settings = AccountSettings(self.client, False, {}) + + self.assertEqual(settings.longview_subscription.id, "longview-100") + self.assertEqual(settings.managed, False) + self.assertEqual(settings.network_helper, False) + self.assertEqual(settings.object_storage, "active") + self.assertEqual(settings.backups_enabled, True) + self.assertEqual( + settings.interfaces_for_new_linodes, + AccountSettingsInterfacesForNewLinodes.linode_default_but_legacy_config_allowed, + ) + + def test_post_account_settings(self): + """ + Tests that account settings can be updated successfully + """ + settings = self.client.account.settings() + + settings.network_helper = True + settings.backups_enabled = False + settings.interfaces_for_new_linodes = ( + AccountSettingsInterfacesForNewLinodes.linode_only + ) + + with self.mock_put("/account/settings") as m: + settings.save() + + assert m.call_data == { + "network_helper": True, + "backups_enabled": False, + "interfaces_for_new_linodes": AccountSettingsInterfacesForNewLinodes.linode_only, + "maintenance_policy": "linode/migrate", + } + + def test_update_account_settings(self): + """ + Tests that account settings can be updated + """ + with self.mock_put("account/settings") as m: + settings = AccountSettings(self.client, False, {}) + + settings.maintenance_policy = "linode/migrate" + settings.save() + + self.assertEqual(m.call_url, "/account/settings") + self.assertEqual( + m.call_data, + { + "maintenance_policy": "linode/migrate", + }, + ) + + def test_get_event(self): + """ + Tests that an event is loaded correctly by ID + """ + event = Event(self.client, 123, {}) + + self.assertEqual(event.action, "ticket_create") + self.assertEqual(event.created, datetime(2025, 3, 25, 12, 0, 0)) + self.assertEqual(event.duration, 300.56) + + self.assertIsNotNone(event.entity) + self.assertEqual(event.entity.id, 11111) + self.assertEqual(event.entity.label, "Problem booting my Linode") + self.assertEqual(event.entity.type, "ticket") + self.assertEqual(event.entity.url, "/v4/support/tickets/11111") + + self.assertEqual(event.id, 123) + self.assertEqual(event.message, "Ticket created for user issue.") + self.assertIsNone(event.percent_complete) + self.assertIsNone(event.rate) + self.assertTrue(event.read) + + self.assertIsNotNone(event.secondary_entity) + self.assertEqual(event.secondary_entity.id, "linode/debian9") + self.assertEqual(event.secondary_entity.label, "linode1234") + self.assertEqual(event.secondary_entity.type, "linode") + self.assertEqual( + event.secondary_entity.url, "/v4/linode/instances/1234" + ) + + self.assertTrue(event.seen) + self.assertEqual(event.status, "completed") + self.assertEqual(event.username, "exampleUser") + + self.assertEqual(event.maintenance_policy_set, "Tentative") + self.assertEqual(event.description, "Scheduled maintenance") + self.assertEqual(event.source, "user") + self.assertEqual(event.not_before, datetime(2025, 3, 25, 12, 0, 0)) + self.assertEqual(event.start_time, datetime(2025, 3, 25, 12, 30, 0)) + self.assertEqual(event.complete_time, datetime(2025, 3, 25, 13, 0, 0)) + + def test_get_invoice(self): + """ + Tests that an invoice is loaded correctly by ID + """ + invoice = Invoice(self.client, 123, {}) + + self.assertEqual(invoice.date, datetime(2018, 1, 1, 0, 1, 1)) + self.assertEqual(invoice.id, 123) + self.assertEqual(invoice.label, "Invoice") + self.assertEqual(invoice.subtotal, 120.25) + self.assertEqual(invoice.tax, 12.25) + self.assertEqual(invoice.total, 132.5) + self.assertIsNotNone(invoice.tax_summary) + + def test_get_oauth_client(self): + """ + Tests that an oauth client is loaded correctly by ID + """ + client = OAuthClient(self.client, "2737bf16b39ab5d7b4a1", {}) + + self.assertEqual(client.id, "2737bf16b39ab5d7b4a1") + self.assertEqual(client.label, "Test_Client_1") + self.assertFalse(client.public) + self.assertEqual( + client.redirect_uri, "https://example.org/oauth/callback" + ) + self.assertEqual(client.secret, "") + self.assertEqual(client.status, "active") + self.assertEqual( + client.thumbnail_url, + "https://api.linode.com/v4/account/clients/2737bf16b39ab5d7b4a1/thumbnail", + ) + + def test_get_user(self): + """ + Tests that a user is loaded correctly by username + """ + user = User(self.client, "test-user", {}) + + self.assertEqual(user.username, "test-user") + self.assertEqual(user.email, "test-user@linode.com") + self.assertTrue(user.restricted) + self.assertTrue(user.tfa_enabled) + self.assertIsNotNone(user.ssh_keys) + + def test_get_service_transfer(self): + """ + Tests that a service transfer is loaded correctly by token + """ + serviceTransfer = ServiceTransfer(self.client, "12345") + + self.assertEqual(serviceTransfer.token, "12345") + self.assertTrue(serviceTransfer.is_sender) + self.assertEqual(serviceTransfer.status, "pending") + + def test_get_payment_method(self): + """ + Tests that a payment method is loaded correctly by ID + """ + paymentMethod = PaymentMethod(self.client, 123) + + self.assertEqual(paymentMethod.id, 123) + self.assertTrue(paymentMethod.is_default) + self.assertEqual(paymentMethod.type, "credit_card") + + def test_payment_method_make_default(self): + """ + Tests that making a payment method default creates the correct api request. + """ + paymentMethod = PaymentMethod(self.client, 123) + + with self.mock_post({}) as m: + paymentMethod.payment_method_make_default() + self.assertEqual( + m.call_url, "/account/payment-methods/123/make-default" + ) + + def test_service_transfer_accept(self): + """ + Tests that accepting a service transfer creates the correct api request. + """ + serviceTransfer = ServiceTransfer(self.client, "12345") + + with self.mock_post({}) as m: + serviceTransfer.service_transfer_accept() + self.assertEqual( + m.call_url, "/account/service-transfers/12345/accept" + ) + + +class AccountBetaProgramTest(ClientBaseCase): + """ + Tests methods of the AccountBetaProgram + """ + + def test_account_beta_program_api_get(self): + beta_id = "cool" + account_beta_url = "/account/betas/{}".format(beta_id) + + with self.mock_get(account_beta_url) as m: + beta = AccountBetaProgram(self.client, beta_id) + self.assertEqual(beta.id, beta_id) + self.assertEqual(beta.enrolled, datetime(2018, 1, 2, 3, 4, 5)) + self.assertEqual(beta.started, datetime(2018, 1, 2, 3, 4, 5)) + self.assertEqual(beta.ended, datetime(2018, 1, 2, 3, 4, 5)) + + self.assertEqual(m.call_url, account_beta_url) + + +class AccountAvailabilityTest(ClientBaseCase): + """ + Test methods of the AccountAvailability + """ + + def test_account_availability_api_list(self): + with self.mock_get("/account/availability") as m: + availabilities = self.client.account.availabilities() + + for avail in availabilities: + assert avail.region is not None + assert len(avail.unavailable) == 0 + assert len(avail.available) > 0 + + self.assertEqual(m.call_url, "/account/availability") + + def test_account_availability_api_get(self): + region_id = "us-east" + account_availability_url = "/account/availability/{}".format(region_id) + + with self.mock_get(account_availability_url) as m: + availability = AccountAvailability(self.client, region_id) + self.assertEqual(availability.region, region_id) + self.assertEqual(availability.unavailable, []) + self.assertEqual(availability.available, ["Linodes", "Kubernetes"]) + + self.assertEqual(m.call_url, account_availability_url) + + +class ChildAccountTest(ClientBaseCase): + """ + Test methods of the ChildAccount + """ + + def test_child_account_api_list(self): + result = self.client.account.child_accounts() + self.assertEqual(len(result), 1) + self.assertEqual(result[0].euuid, "E1AF5EEC-526F-487D-B317EBEB34C87D71") + + def test_child_account_create_token(self): + child_account = self.client.load(ChildAccount, 123456) + with self.mock_post("/account/child-accounts/123456/token") as m: + token = child_account.create_token() + self.assertEqual(token.token, "abcdefghijklmnop") + self.assertEqual(m.call_data, {}) + + +def test_get_user_grant(): + """ + Tests that a user grant is loaded correctly + """ + grants = get_obj_grants() + + assert grants.count(("linode", Instance)) > 0 + assert grants.count(("domain", Domain)) > 0 + assert grants.count(("stackscript", StackScript)) > 0 + assert grants.count(("nodebalancer", NodeBalancer)) > 0 + assert grants.count(("volume", Volume)) > 0 + assert grants.count(("image", Image)) > 0 + assert grants.count(("longview", LongviewClient)) > 0 + assert grants.count(("database", Database)) > 0 + assert grants.count(("firewall", Firewall)) > 0 + assert grants.count(("vpc", VPC)) > 0 + + +def test_user_grants_serialization(): + """ + Tests that user grants from JSON is serialized correctly + """ + user_grants_json = { + "database": [ + {"id": 123, "label": "example-entity", "permissions": "read_only"} + ], + "domain": [ + {"id": 123, "label": "example-entity", "permissions": "read_only"} + ], + "firewall": [ + {"id": 123, "label": "example-entity", "permissions": "read_only"} + ], + "global": { + "account_access": "read_only", + "add_databases": True, + "add_domains": True, + "add_firewalls": True, + "add_images": True, + "add_linodes": True, + "add_longview": True, + "add_nodebalancers": True, + "add_stackscripts": True, + "add_volumes": True, + "add_vpcs": True, + "cancel_account": False, + "child_account_access": True, + "longview_subscription": True, + }, + "image": [ + {"id": 123, "label": "example-entity", "permissions": "read_only"} + ], + "linode": [ + {"id": 123, "label": "example-entity", "permissions": "read_only"} + ], + "longview": [ + {"id": 123, "label": "example-entity", "permissions": "read_only"} + ], + "nodebalancer": [ + {"id": 123, "label": "example-entity", "permissions": "read_only"} + ], + "stackscript": [ + {"id": 123, "label": "example-entity", "permissions": "read_only"} + ], + "volume": [ + {"id": 123, "label": "example-entity", "permissions": "read_only"} + ], + "vpc": [ + {"id": 123, "label": "example-entity", "permissions": "read_only"} + ], + } + + expected_serialized_grants = deepcopy(user_grants_json) + + for grants in expected_serialized_grants.values(): + if isinstance(grants, Iterable): + for grant in grants: + if isinstance(grant, dict) and "label" in grant: + del grant["label"] + + assert ( + UserGrants(None, None, user_grants_json)._serialize() + == expected_serialized_grants + ) diff --git a/test/unit/objects/base_test.py b/test/unit/objects/base_test.py new file mode 100644 index 000000000..d60a3bd38 --- /dev/null +++ b/test/unit/objects/base_test.py @@ -0,0 +1,286 @@ +from dataclasses import dataclass +from test.unit.base import ClientBaseCase + +from linode_api4.objects import Base, JSONObject, MappedObject, Property +from linode_api4.objects.base import ( + ExplicitNullValue, + _flatten_request_body_recursive, +) + + +class FlattenRequestBodyRecursiveCase(ClientBaseCase): + """Test cases for _flatten_request_body_recursive function""" + + def test_flatten_primitive_types(self): + """Test that primitive types are returned as-is""" + self.assertEqual(_flatten_request_body_recursive(123), 123) + self.assertEqual(_flatten_request_body_recursive("test"), "test") + self.assertEqual(_flatten_request_body_recursive(3.14), 3.14) + self.assertEqual(_flatten_request_body_recursive(True), True) + self.assertEqual(_flatten_request_body_recursive(False), False) + self.assertEqual(_flatten_request_body_recursive(None), None) + + def test_flatten_dict(self): + """Test that dicts are recursively flattened""" + test_dict = {"key1": "value1", "key2": 123, "key3": True} + result = _flatten_request_body_recursive(test_dict) + self.assertEqual(result, test_dict) + + def test_flatten_nested_dict(self): + """Test that nested dicts are recursively flattened""" + test_dict = { + "level1": { + "level2": {"level3": "value", "number": 42}, + "string": "test", + }, + "array": [1, 2, 3], + } + result = _flatten_request_body_recursive(test_dict) + self.assertEqual(result, test_dict) + + def test_flatten_list(self): + """Test that lists are recursively flattened""" + test_list = [1, "two", 3.0, True] + result = _flatten_request_body_recursive(test_list) + self.assertEqual(result, test_list) + + def test_flatten_nested_list(self): + """Test that nested lists are recursively flattened""" + test_list = [[1, 2], [3, [4, 5]], "string"] + result = _flatten_request_body_recursive(test_list) + self.assertEqual(result, test_list) + + def test_flatten_base_object(self): + """Test that Base objects are flattened to their ID""" + + class TestBase(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + } + + obj = TestBase(self.client, 123) + result = _flatten_request_body_recursive(obj) + self.assertEqual(result, 123) + + def test_flatten_base_object_in_dict(self): + """Test that Base objects in dicts are flattened to their ID""" + + class TestBase(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + } + + obj = TestBase(self.client, 456) + test_dict = {"resource": obj, "name": "test"} + result = _flatten_request_body_recursive(test_dict) + self.assertEqual(result, {"resource": 456, "name": "test"}) + + def test_flatten_base_object_in_list(self): + """Test that Base objects in lists are flattened to their ID""" + + class TestBase(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + "label": Property(mutable=True), + } + + obj1 = TestBase(self.client, 111) + obj2 = TestBase(self.client, 222) + test_list = [obj1, "middle", obj2] + result = _flatten_request_body_recursive(test_list) + self.assertEqual(result, [111, "middle", 222]) + + def test_flatten_explicit_null_instance(self): + """Test that ExplicitNullValue instances are converted to None""" + result = _flatten_request_body_recursive(ExplicitNullValue()) + self.assertIsNone(result) + + def test_flatten_explicit_null_class(self): + """Test that ExplicitNullValue class is converted to None""" + result = _flatten_request_body_recursive(ExplicitNullValue) + self.assertIsNone(result) + + def test_flatten_explicit_null_in_dict(self): + """Test that ExplicitNullValue in dicts is converted to None""" + test_dict = { + "field1": "value", + "field2": ExplicitNullValue(), + "field3": ExplicitNullValue, + } + result = _flatten_request_body_recursive(test_dict) + self.assertEqual( + result, {"field1": "value", "field2": None, "field3": None} + ) + + def test_flatten_explicit_null_in_list(self): + """Test that ExplicitNullValue in lists is converted to None""" + test_list = ["value", ExplicitNullValue(), ExplicitNullValue, 123] + result = _flatten_request_body_recursive(test_list) + self.assertEqual(result, ["value", None, None, 123]) + + def test_flatten_mapped_object(self): + """Test that MappedObject is serialized""" + mapped_obj = MappedObject(key1="value1", key2=123) + result = _flatten_request_body_recursive(mapped_obj) + self.assertEqual(result, {"key1": "value1", "key2": 123}) + + def test_flatten_mapped_object_nested(self): + """Test that nested MappedObject is serialized""" + mapped_obj = MappedObject( + outer="value", inner={"nested_key": "nested_value"} + ) + result = _flatten_request_body_recursive(mapped_obj) + # The inner dict becomes a MappedObject when created + self.assertIn("outer", result) + self.assertEqual(result["outer"], "value") + self.assertIn("inner", result) + + def test_flatten_mapped_object_in_dict(self): + """Test that MappedObject in dicts is serialized""" + mapped_obj = MappedObject(key="value") + test_dict = {"field": mapped_obj, "other": "data"} + result = _flatten_request_body_recursive(test_dict) + self.assertEqual(result, {"field": {"key": "value"}, "other": "data"}) + + def test_flatten_mapped_object_in_list(self): + """Test that MappedObject in lists is serialized""" + mapped_obj = MappedObject(key="value") + test_list = [mapped_obj, "string", 123] + result = _flatten_request_body_recursive(test_list) + self.assertEqual(result, [{"key": "value"}, "string", 123]) + + def test_flatten_json_object(self): + """Test that JSONObject subclasses are serialized""" + + @dataclass + class TestJSONObject(JSONObject): + field1: str = "" + field2: int = 0 + + json_obj = TestJSONObject.from_json({"field1": "test", "field2": 42}) + result = _flatten_request_body_recursive(json_obj) + self.assertEqual(result, {"field1": "test", "field2": 42}) + + def test_flatten_json_object_in_dict(self): + """Test that JSONObject in dicts is serialized""" + + @dataclass + class TestJSONObject(JSONObject): + name: str = "" + + json_obj = TestJSONObject.from_json({"name": "test"}) + test_dict = {"obj": json_obj, "value": 123} + result = _flatten_request_body_recursive(test_dict) + self.assertEqual(result, {"obj": {"name": "test"}, "value": 123}) + + def test_flatten_json_object_in_list(self): + """Test that JSONObject in lists is serialized""" + + @dataclass + class TestJSONObject(JSONObject): + id: int = 0 + + json_obj = TestJSONObject.from_json({"id": 999}) + test_list = [json_obj, "text"] + result = _flatten_request_body_recursive(test_list) + self.assertEqual(result, [{"id": 999}, "text"]) + + def test_flatten_complex_nested_structure(self): + """Test a complex nested structure with multiple types""" + + class TestBase(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + } + + @dataclass + class TestJSONObject(JSONObject): + value: str = "" + + base_obj = TestBase(self.client, 555) + mapped_obj = MappedObject(key="mapped") + json_obj = TestJSONObject.from_json({"value": "json"}) + + complex_structure = { + "base": base_obj, + "mapped": mapped_obj, + "json": json_obj, + "null": ExplicitNullValue(), + "list": [base_obj, mapped_obj, json_obj, ExplicitNullValue], + "nested": { + "deep": { + "base": base_obj, + "primitives": [1, "two", 3.0], + } + }, + } + + result = _flatten_request_body_recursive(complex_structure) + + self.assertEqual(result["base"], 555) + self.assertEqual(result["mapped"], {"key": "mapped"}) + self.assertEqual(result["json"], {"value": "json"}) + self.assertIsNone(result["null"]) + self.assertEqual( + result["list"], [555, {"key": "mapped"}, {"value": "json"}, None] + ) + self.assertEqual(result["nested"]["deep"]["base"], 555) + self.assertEqual( + result["nested"]["deep"]["primitives"], [1, "two", 3.0] + ) + + def test_flatten_with_is_put_false(self): + """Test that is_put parameter is passed through""" + + @dataclass + class TestJSONObject(JSONObject): + field: str = "" + + def _serialize(self, is_put=False): + return {"field": self.field, "is_put": is_put} + + json_obj = TestJSONObject.from_json({"field": "test"}) + result = _flatten_request_body_recursive(json_obj, is_put=False) + self.assertEqual(result, {"field": "test", "is_put": False}) + + def test_flatten_with_is_put_true(self): + """Test that is_put=True parameter is passed through""" + + @dataclass + class TestJSONObject(JSONObject): + field: str = "" + + def _serialize(self, is_put=False): + return {"field": self.field, "is_put": is_put} + + json_obj = TestJSONObject.from_json({"field": "test"}) + result = _flatten_request_body_recursive(json_obj, is_put=True) + self.assertEqual(result, {"field": "test", "is_put": True}) + + def test_flatten_empty_dict(self): + """Test that empty dicts are handled correctly""" + result = _flatten_request_body_recursive({}) + self.assertEqual(result, {}) + + def test_flatten_empty_list(self): + """Test that empty lists are handled correctly""" + result = _flatten_request_body_recursive([]) + self.assertEqual(result, []) + + def test_flatten_dict_with_none_values(self): + """Test that None values in dicts are preserved""" + test_dict = {"key1": "value", "key2": None, "key3": 0} + result = _flatten_request_body_recursive(test_dict) + self.assertEqual(result, test_dict) + + def test_flatten_list_with_none_values(self): + """Test that None values in lists are preserved""" + test_list = ["value", None, 0, ""] + result = _flatten_request_body_recursive(test_list) + self.assertEqual(result, test_list) diff --git a/test/unit/objects/beta_test.py b/test/unit/objects/beta_test.py new file mode 100644 index 000000000..98c6437c1 --- /dev/null +++ b/test/unit/objects/beta_test.py @@ -0,0 +1,30 @@ +from datetime import datetime +from test.unit.base import ClientBaseCase + +from linode_api4.objects import BetaProgram + + +class BetaProgramTest(ClientBaseCase): + """ + Test the methods of the Beta Program. + """ + + def test_beta_program_api_get(self): + beta_id = "active" + beta_program_api_get_url = "/betas/{}".format(beta_id) + + with self.mock_get(beta_program_api_get_url) as m: + beta_program = BetaProgram(self.client, beta_id) + self.assertEqual(beta_program.id, beta_id) + self.assertEqual(beta_program.label, "active closed beta") + self.assertEqual(beta_program.description, "An active closed beta") + self.assertEqual( + beta_program.started, datetime(2018, 1, 2, 3, 4, 5) + ) + self.assertEqual(beta_program.ended, None) + self.assertEqual(beta_program.greenlight_only, True) + self.assertEqual( + beta_program.more_info, "a link with even more info" + ) + + self.assertEqual(m.call_url, beta_program_api_get_url) diff --git a/test/unit/objects/database_test.py b/test/unit/objects/database_test.py new file mode 100644 index 000000000..3d0eb4dad --- /dev/null +++ b/test/unit/objects/database_test.py @@ -0,0 +1,407 @@ +import logging +from test.unit.base import ClientBaseCase + +from linode_api4 import ( + DatabasePrivateNetwork, + MySQLDatabaseConfigMySQLOptions, + MySQLDatabaseConfigOptions, + PostgreSQLDatabase, + PostgreSQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigPGOptions, +) +from linode_api4.objects import MySQLDatabase + +logger = logging.getLogger(__name__) + + +class MySQLDatabaseTest(ClientBaseCase): + """ + Tests methods of the MySQLDatabase class + """ + + def test_create(self): + """ + Test that MySQL databases can be created + """ + + logger = logging.getLogger(__name__) + + with self.mock_post("/databases/mysql/instances") as m: + # We don't care about errors here; we just want to + # validate the request. + try: + self.client.database.mysql_create( + "cool", + "us-southeast", + "mysql/8.0.26", + "g6-standard-1", + cluster_size=3, + engine_config=MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions( + connect_timeout=20 + ), + binlog_retention_period=200, + ), + private_network=DatabasePrivateNetwork( + vpc_id=1234, + subnet_id=5678, + public_access=True, + ), + ) + except Exception as e: + logger.warning( + "An error occurred while validating the request: %s", e + ) + + self.assertEqual(m.method, "post") + self.assertEqual(m.call_url, "/databases/mysql/instances") + self.assertEqual(m.call_data["label"], "cool") + self.assertEqual(m.call_data["region"], "us-southeast") + self.assertEqual(m.call_data["engine"], "mysql/8.0.26") + self.assertEqual(m.call_data["type"], "g6-standard-1") + self.assertEqual(m.call_data["cluster_size"], 3) + self.assertEqual( + m.call_data["engine_config"]["mysql"]["connect_timeout"], 20 + ) + self.assertEqual( + m.call_data["engine_config"]["binlog_retention_period"], 200 + ) + + self.assertEqual(m.call_data["private_network"]["vpc_id"], 1234) + self.assertEqual(m.call_data["private_network"]["subnet_id"], 5678) + self.assertEqual( + m.call_data["private_network"]["public_access"], True + ) + + def test_update(self): + """ + Test that the MySQL database can be updated + """ + + with self.mock_put("/databases/mysql/instances/123") as m: + new_allow_list = ["192.168.0.1/32"] + + db = MySQLDatabase(self.client, 123) + + db.updates.day_of_week = 2 + db.allow_list = new_allow_list + db.label = "cool" + db.engine_config = MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions(connect_timeout=20), + binlog_retention_period=200, + ) + db.private_network = DatabasePrivateNetwork( + vpc_id=1234, + subnet_id=5678, + public_access=True, + ) + + db.save() + + self.assertEqual(m.method, "put") + self.assertEqual(m.call_url, "/databases/mysql/instances/123") + self.assertEqual(m.call_data["label"], "cool") + self.assertEqual(m.call_data["updates"]["day_of_week"], 2) + self.assertEqual(m.call_data["allow_list"], new_allow_list) + self.assertEqual( + m.call_data["engine_config"]["mysql"]["connect_timeout"], 20 + ) + self.assertEqual( + m.call_data["engine_config"]["binlog_retention_period"], 200 + ) + + self.assertEqual(m.call_data["private_network"]["vpc_id"], 1234) + self.assertEqual(m.call_data["private_network"]["subnet_id"], 5678) + self.assertEqual( + m.call_data["private_network"]["public_access"], True + ) + + def test_patch(self): + """ + Test MySQL Database patching logic. + """ + with self.mock_post("/databases/mysql/instances/123/patch") as m: + db = MySQLDatabase(self.client, 123) + + db.patch() + + self.assertEqual(m.method, "post") + self.assertEqual(m.call_url, "/databases/mysql/instances/123/patch") + + def test_get_ssl(self): + """ + Test MySQL SSL cert logic + """ + db = MySQLDatabase(self.client, 123) + + ssl = db.ssl + + self.assertEqual(ssl.ca_certificate, "LS0tLS1CRUdJ...==") + + def test_get_credentials(self): + """ + Test MySQL credentials logic + """ + db = MySQLDatabase(self.client, 123) + + creds = db.credentials + + self.assertEqual(creds.password, "s3cur3P@ssw0rd") + self.assertEqual(creds.username, "linroot") + + def test_reset_credentials(self): + """ + Test resetting MySQL credentials + """ + with self.mock_post( + "/databases/mysql/instances/123/credentials/reset" + ) as m: + db = MySQLDatabase(self.client, 123) + + db.credentials_reset() + + self.assertEqual(m.method, "post") + self.assertEqual( + m.call_url, "/databases/mysql/instances/123/credentials/reset" + ) + + def test_suspend(self): + """ + Test MySQL Database suspend logic. + """ + with self.mock_post("/databases/mysql/instances/123/suspend") as m: + db = MySQLDatabase(self.client, 123) + + db.suspend() + + self.assertEqual(m.method, "post") + self.assertEqual( + m.call_url, "/databases/mysql/instances/123/suspend" + ) + + def test_resume(self): + """ + Test MySQL Database resume logic. + """ + with self.mock_post("/databases/mysql/instances/123/resume") as m: + db = MySQLDatabase(self.client, 123) + + db.resume() + + self.assertEqual(m.method, "post") + self.assertEqual( + m.call_url, "/databases/mysql/instances/123/resume" + ) + + +class PostgreSQLDatabaseTest(ClientBaseCase): + """ + Tests methods of the PostgreSQLDatabase class + """ + + def test_create(self): + """ + Test that PostgreSQL databases can be created + """ + + with self.mock_post("/databases/postgresql/instances") as m: + # We don't care about errors here; we just want to + # validate the request. + try: + self.client.database.postgresql_create( + "cool", + "us-southeast", + "postgresql/13.2", + "g6-standard-1", + cluster_size=3, + engine_config=PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_scale_factor=0.5, + pg_partman_bgw_interval=3600, + pg_partman_bgw_role="myrolename", + pg_stat_monitor_pgsm_enable_query_plan=False, + pg_stat_monitor_pgsm_max_buckets=10, + pg_stat_statements_track="top", + ), + work_mem=4, + ), + private_network=DatabasePrivateNetwork( + vpc_id=1234, + subnet_id=5678, + public_access=True, + ), + ) + except Exception: + pass + + self.assertEqual(m.method, "post") + self.assertEqual(m.call_url, "/databases/postgresql/instances") + self.assertEqual(m.call_data["label"], "cool") + self.assertEqual(m.call_data["region"], "us-southeast") + self.assertEqual(m.call_data["engine"], "postgresql/13.2") + self.assertEqual(m.call_data["type"], "g6-standard-1") + self.assertEqual(m.call_data["cluster_size"], 3) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "autovacuum_analyze_scale_factor" + ], + 0.5, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"]["pg_partman_bgw.interval"], + 3600, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"]["pg_partman_bgw.role"], + "myrolename", + ) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "pg_stat_monitor.pgsm_enable_query_plan" + ], + False, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "pg_stat_monitor.pgsm_max_buckets" + ], + 10, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"]["pg_stat_statements.track"], + "top", + ) + self.assertEqual(m.call_data["engine_config"]["work_mem"], 4) + + self.assertEqual(m.call_data["private_network"]["vpc_id"], 1234) + self.assertEqual(m.call_data["private_network"]["subnet_id"], 5678) + self.assertEqual( + m.call_data["private_network"]["public_access"], True + ) + + def test_update(self): + """ + Test that the PostgreSQL database can be updated + """ + + with self.mock_put("/databases/postgresql/instances/123") as m: + new_allow_list = ["192.168.0.1/32"] + + db = PostgreSQLDatabase(self.client, 123) + + db.updates.day_of_week = 2 + db.allow_list = new_allow_list + db.label = "cool" + db.engine_config = PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_scale_factor=0.5 + ), + work_mem=4, + ) + + db.private_network = DatabasePrivateNetwork( + vpc_id=1234, + subnet_id=5678, + public_access=True, + ) + + db.save() + + self.assertEqual(m.method, "put") + self.assertEqual(m.call_url, "/databases/postgresql/instances/123") + self.assertEqual(m.call_data["label"], "cool") + self.assertEqual(m.call_data["updates"]["day_of_week"], 2) + self.assertEqual(m.call_data["allow_list"], new_allow_list) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "autovacuum_analyze_scale_factor" + ], + 0.5, + ) + self.assertEqual(m.call_data["engine_config"]["work_mem"], 4) + + self.assertEqual(m.call_data["private_network"]["vpc_id"], 1234) + self.assertEqual(m.call_data["private_network"]["subnet_id"], 5678) + self.assertEqual( + m.call_data["private_network"]["public_access"], True + ) + + def test_patch(self): + """ + Test PostgreSQL Database patching logic. + """ + with self.mock_post("/databases/postgresql/instances/123/patch") as m: + db = PostgreSQLDatabase(self.client, 123) + + db.patch() + + self.assertEqual(m.method, "post") + self.assertEqual( + m.call_url, "/databases/postgresql/instances/123/patch" + ) + + def test_get_ssl(self): + """ + Test PostgreSQL SSL cert logic + """ + db = PostgreSQLDatabase(self.client, 123) + + ssl = db.ssl + + self.assertEqual(ssl.ca_certificate, "LS0tLS1CRUdJ...==") + + def test_get_credentials(self): + """ + Test PostgreSQL credentials logic + """ + db = PostgreSQLDatabase(self.client, 123) + + creds = db.credentials + + self.assertEqual(creds.password, "s3cur3P@ssw0rd") + self.assertEqual(creds.username, "linroot") + + def test_reset_credentials(self): + """ + Test resetting PostgreSQL credentials + """ + with self.mock_post( + "/databases/postgresql/instances/123/credentials/reset" + ) as m: + db = PostgreSQLDatabase(self.client, 123) + + db.credentials_reset() + + self.assertEqual(m.method, "post") + self.assertEqual( + m.call_url, + "/databases/postgresql/instances/123/credentials/reset", + ) + + def test_suspend(self): + """ + Test PostgreSQL Database suspend logic. + """ + with self.mock_post("/databases/postgresql/instances/123/suspend") as m: + db = PostgreSQLDatabase(self.client, 123) + + db.suspend() + + self.assertEqual(m.method, "post") + self.assertEqual( + m.call_url, "/databases/postgresql/instances/123/suspend" + ) + + def test_resume(self): + """ + Test PostgreSQL Database resume logic. + """ + with self.mock_post("/databases/postgresql/instances/123/resume") as m: + db = PostgreSQLDatabase(self.client, 123) + + db.resume() + + self.assertEqual(m.method, "post") + self.assertEqual( + m.call_url, "/databases/postgresql/instances/123/resume" + ) diff --git a/test/unit/objects/domain_test.py b/test/unit/objects/domain_test.py new file mode 100644 index 000000000..f67503c9c --- /dev/null +++ b/test/unit/objects/domain_test.py @@ -0,0 +1,59 @@ +from test.unit.base import ClientBaseCase + +from linode_api4.objects import Domain, DomainRecord + + +class DomainGeneralTest(ClientBaseCase): + """ + Tests methods of the Domain class. + """ + + def test_domain_get(self): + domain_record = DomainRecord(self.client, 123456, 12345) + + self.assertEqual(domain_record.id, 123456) + + def test_save_null_values_excluded(self): + with self.mock_put("domains/12345") as m: + domain = self.client.load(Domain, 12345) + + domain.type = "slave" + domain.master_ips = ["127.0.0.1"] + domain.save() + self.assertTrue("group" not in m.call_data.keys()) + + def test_zone_file_view(self): + domain = Domain(self.client, 12345) + + with self.mock_get("/domains/12345/zone-file") as m: + result = domain.zone_file_view() + self.assertEqual(m.call_url, "/domains/12345/zone-file") + self.assertIsNotNone(result) + + def test_clone(self): + domain = Domain(self.client, 12345) + + with self.mock_post("/domains/12345/clone") as m: + clone = domain.clone("example.org") + self.assertEqual(m.call_url, "/domains/12345/clone") + self.assertEqual(m.call_data["domain"], "example.org") + self.assertEqual(clone.id, 12345) + + def test_import(self): + domain = Domain(self.client, 12345) + + with self.mock_post("/domains/import") as m: + domain.domain_import("example.org", "examplenameserver.com") + self.assertEqual(m.call_url, "/domains/import") + self.assertEqual(m.call_data["domain"], "example.org") + self.assertEqual( + m.call_data["remote_nameserver"], "examplenameserver.com" + ) + + with self.mock_post("/domains/import") as m: + domain.domain_import(domain, "examplenameserver.com") + self.assertEqual(m.call_url, "/domains/import") + self.assertEqual(m.call_data["domain"], "example.org") + self.assertEqual( + m.call_data["remote_nameserver"], "examplenameserver.com" + ) diff --git a/test/unit/objects/firewall_test.py b/test/unit/objects/firewall_test.py new file mode 100644 index 000000000..f4c6efb66 --- /dev/null +++ b/test/unit/objects/firewall_test.py @@ -0,0 +1,160 @@ +from test.unit.base import ClientBaseCase + +from linode_api4 import FirewallTemplate, MappedObject +from linode_api4.objects import Firewall, FirewallDevice + + +class FirewallTest(ClientBaseCase): + """ + Tests methods of the Firewall class + """ + + def test_get_rules(self): + """ + Test that the rules can be retrieved from a Firewall + """ + firewall = Firewall(self.client, 123) + rules = firewall.rules + + self.assertEqual(len(rules.inbound), 0) + self.assertEqual(rules.inbound_policy, "DROP") + self.assertEqual(len(rules.outbound), 0) + self.assertEqual(rules.outbound_policy, "DROP") + + def test_update_rules(self): + """ + Test that the rules can be updated for a Firewall + """ + + firewall = Firewall(self.client, 123) + + with self.mock_put("networking/firewalls/123/rules") as m: + new_rules = { + "inbound": [ + { + "action": "ACCEPT", + "addresses": { + "ipv4": ["0.0.0.0/0"], + "ipv6": ["ff00::/8"], + }, + "description": "A really cool firewall rule.", + "label": "really-cool-firewall-rule", + "ports": "80", + "protocol": "TCP", + } + ], + "inbound_policy": "ALLOW", + "outbound": [], + "outbound_policy": "ALLOW", + } + + firewall.update_rules(new_rules) + + self.assertEqual(m.method, "put") + self.assertEqual(m.call_url, "/networking/firewalls/123/rules") + + self.assertEqual(m.call_data, new_rules) + + def test_create_device(self): + """ + Tests that firewall devices can be created successfully + """ + + firewall = Firewall(self.client, 123) + + with self.mock_post("networking/firewalls/123/devices/123") as m: + firewall.device_create(123, "linode") + assert m.call_data == {"id": 123, "type": "linode"} + + with self.mock_post("networking/firewalls/123/devices/456") as m: + firewall.device_create(123, "interface") + assert m.call_data == {"id": 123, "type": "interface"} + + +class FirewallDevicesTest(ClientBaseCase): + """ + Tests methods of Firewall devices + """ + + def test_get_devices(self): + """ + Tests that devices can be pulled from a firewall + """ + firewall = Firewall(self.client, 123) + assert len(firewall.devices) == 2 + + assert firewall.devices[0].created is not None + assert firewall.devices[0].id == 123 + assert firewall.devices[0].updated is not None + + assert firewall.devices[0].entity.id == 123 + assert firewall.devices[0].entity.label == "my-linode" + assert firewall.devices[0].entity.type == "linode" + assert firewall.devices[0].entity.url == "/v4/linode/instances/123" + + assert firewall.devices[1].created is not None + assert firewall.devices[1].id == 456 + assert firewall.devices[1].updated is not None + + assert firewall.devices[1].entity.id == 123 + assert firewall.devices[1].entity.label is None + assert firewall.devices[1].entity.type == "interface" + assert ( + firewall.devices[1].entity.url + == "/v4/linode/instances/123/interfaces/123" + ) + + def test_get_device(self): + """ + Tests that a device is loaded correctly by ID + """ + device = FirewallDevice(self.client, 123, 123) + self.assertEqual(device._populated, False) + + self.assertEqual(device.id, 123) + self.assertEqual(device.entity.id, 123) + self.assertEqual(device.entity.label, "my-linode") + self.assertEqual(device.entity.type, "linode") + self.assertEqual(device.entity.url, "/v4/linode/instances/123") + + self.assertEqual(device._populated, True) + + +class FirewallTemplatesTest(ClientBaseCase): + @staticmethod + def assert_rules(rules: MappedObject): + assert rules.outbound_policy == "DROP" + assert len(rules.outbound) == 1 + + assert rules.inbound_policy == "DROP" + assert len(rules.inbound) == 1 + + outbound_rule = rules.outbound[0] + assert outbound_rule.action == "ACCEPT" + assert outbound_rule.addresses.ipv4[0] == "192.0.2.0/24" + assert outbound_rule.addresses.ipv4[1] == "198.51.100.2/32" + assert outbound_rule.addresses.ipv6[0] == "2001:DB8::/128" + assert outbound_rule.description == "test" + assert outbound_rule.label == "test-rule" + assert outbound_rule.ports == "22-24, 80, 443" + assert outbound_rule.protocol == "TCP" + + inbound_rule = rules.inbound[0] + assert inbound_rule.action == "ACCEPT" + assert inbound_rule.addresses.ipv4[0] == "192.0.2.0/24" + assert inbound_rule.addresses.ipv4[1] == "198.51.100.2/32" + assert inbound_rule.addresses.ipv6[0] == "2001:DB8::/128" + assert inbound_rule.description == "test" + assert inbound_rule.label == "test-rule" + assert inbound_rule.ports == "22-24, 80, 443" + assert inbound_rule.protocol == "TCP" + + def test_get_public(self): + template = self.client.load(FirewallTemplate, "public") + assert template.slug == "public" + self.assert_rules(template.rules) + + def test_get_vpc(self): + template = self.client.load(FirewallTemplate, "vpc") + assert template.slug == "vpc" + self.assert_rules(template.rules) diff --git a/test/unit/objects/image_share_group_test.py b/test/unit/objects/image_share_group_test.py new file mode 100644 index 000000000..e02f0672c --- /dev/null +++ b/test/unit/objects/image_share_group_test.py @@ -0,0 +1,295 @@ +from test.unit.base import ClientBaseCase + +from linode_api4.objects import ( + ImageShareGroup, + ImageShareGroupImagesToAdd, + ImageShareGroupImageToAdd, + ImageShareGroupImageToUpdate, + ImageShareGroupMemberToAdd, + ImageShareGroupMemberToUpdate, + ImageShareGroupToken, +) + + +class ImageShareGroupTest(ClientBaseCase): + """ + Tests the methods of ImageShareGroup class + """ + + def test_get_sharegroup(self): + """ + Tests that an Image Share Group is loaded correctly by ID + """ + sharegroup = ImageShareGroup(self.client, 1234) + + self.assertEqual(sharegroup.id, 1234) + self.assertEqual( + sharegroup.description, "My group of images to share with my team." + ) + self.assertEqual(sharegroup.images_count, 0) + self.assertEqual(sharegroup.is_suspended, False) + self.assertEqual(sharegroup.label, "My Shared Images") + self.assertEqual(sharegroup.members_count, 0) + self.assertEqual( + sharegroup.uuid, "1533863e-16a4-47b5-b829-ac0f35c13278" + ) + + def test_update_sharegroup(self): + """ + Tests that an Image Share Group can be updated + """ + with self.mock_put("/images/sharegroups/1234") as m: + sharegroup = self.client.load(ImageShareGroup, 1234) + sharegroup.label = "Updated Sharegroup Label" + sharegroup.description = "Updated description for my sharegroup." + sharegroup.save() + self.assertEqual(m.call_url, "/images/sharegroups/1234") + self.assertEqual( + m.call_data, + { + "label": "Updated Sharegroup Label", + "description": "Updated description for my sharegroup.", + }, + ) + + def test_delete_sharegroup(self): + """ + Tests that deleting an Image Share Group creates the correct api request + """ + with self.mock_delete() as m: + sharegroup = ImageShareGroup(self.client, 1234) + sharegroup.delete() + + self.assertEqual(m.call_url, "/images/sharegroups/1234") + + def test_add_images_to_sharegroup(self): + """ + Tests that Images can be added to an Image Share Group + """ + with self.mock_post("/images/sharegroups/1234/images") as m: + sharegroup = self.client.load(ImageShareGroup, 1234) + sharegroup.add_images( + ImageShareGroupImagesToAdd( + images=[ + ImageShareGroupImageToAdd(id="private/123"), + ] + ) + ) + + self.assertEqual(m.call_url, "/images/sharegroups/1234/images") + self.assertEqual( + m.call_data, + { + "images": [ + {"id": "private/123"}, + ] + }, + ) + + def test_get_image_shares_in_sharegroup(self): + """ + Tests that Image Shares in an Image Share Group can be retrieved + """ + with self.mock_get("/images/sharegroups/1234/images") as m: + sharegroup = self.client.load(ImageShareGroup, 1234) + images = sharegroup.get_image_shares() + + self.assertEqual(m.call_url, "/images/sharegroups/1234/images") + self.assertEqual(len(images), 1) + self.assertEqual(images[0].id, "shared/1") + + def test_update_image_in_sharegroup(self): + """ + Tests that an Image shared in an Image Share Group can be updated + """ + with self.mock_put("/images/sharegroups/1234/images/shared/1") as m: + sharegroup = self.client.load(ImageShareGroup, 1234) + sharegroup.update_image_share( + ImageShareGroupImageToUpdate(image_share_id="shared/1") + ) + + self.assertEqual( + m.call_url, "/images/sharegroups/1234/images/shared/1" + ) + self.assertEqual( + m.call_data, + { + "image_share_id": "shared/1", + }, + ) + + def test_remove_image_from_sharegroup(self): + """ + Tests that an Image can be removed from an Image Share Group + """ + with self.mock_delete() as m: + sharegroup = self.client.load(ImageShareGroup, 1234) + sharegroup.revoke_image_share("shared/1") + + self.assertEqual( + m.call_url, "/images/sharegroups/1234/images/shared/1" + ) + + def test_add_members_to_sharegroup(self): + """ + Tests that members can be added to an Image Share Group + """ + with self.mock_post("/images/sharegroups/1234/members") as m: + sharegroup = self.client.load(ImageShareGroup, 1234) + sharegroup.add_member( + ImageShareGroupMemberToAdd( + token="secrettoken", + label="New Member", + ) + ) + + self.assertEqual(m.call_url, "/images/sharegroups/1234/members") + self.assertEqual( + m.call_data, + { + "token": "secrettoken", + "label": "New Member", + }, + ) + + def test_get_members_in_sharegroup(self): + """ + Tests that members in an Image Share Group can be retrieved + """ + with self.mock_get("/images/sharegroups/1234/members") as m: + sharegroup = self.client.load(ImageShareGroup, 1234) + members = sharegroup.get_members() + + self.assertEqual(m.call_url, "/images/sharegroups/1234/members") + self.assertEqual(len(members), 1) + self.assertEqual( + members[0].token_uuid, "4591075e-4ba8-43c9-a521-928c3d4a135d" + ) + + def test_get_member_in_sharegroup(self): + """ + Tests that a specific member in an Image Share Group can be retrieved + """ + with self.mock_get("/images/sharegroups/1234/members/abc123") as m: + sharegroup = self.client.load(ImageShareGroup, 1234) + member = sharegroup.get_member("abc123") + + self.assertEqual( + m.call_url, "/images/sharegroups/1234/members/abc123" + ) + self.assertEqual(member.token_uuid, "abc123") + + def test_update_member_in_sharegroup(self): + """ + Tests that a member in an Image Share Group can be updated + """ + with self.mock_put("/images/sharegroups/1234/members/abc123") as m: + sharegroup = self.client.load(ImageShareGroup, 1234) + sharegroup.update_member( + ImageShareGroupMemberToUpdate( + token_uuid="abc123", + label="Updated Member Label", + ) + ) + + self.assertEqual( + m.call_url, "/images/sharegroups/1234/members/abc123" + ) + self.assertEqual( + m.call_data, + { + "label": "Updated Member Label", + }, + ) + + def test_remove_member_from_sharegroup(self): + """ + Tests that a member can be removed from an Image Share Group + """ + with self.mock_delete() as m: + sharegroup = self.client.load(ImageShareGroup, 1234) + sharegroup.remove_member("abc123") + + self.assertEqual( + m.call_url, "/images/sharegroups/1234/members/abc123" + ) + + +class ImageShareGroupTokenTest(ClientBaseCase): + """ + Tests the methods of ImageShareGroupToken class + """ + + def test_get_sharegroup_token(self): + """ + Tests that an Image Share Group Token is loaded correctly by UUID + """ + token = self.client.load(ImageShareGroupToken, "abc123") + + self.assertEqual(token.token_uuid, "abc123") + self.assertEqual(token.label, "My Sharegroup Token") + self.assertEqual(token.sharegroup_label, "A Sharegroup") + self.assertEqual( + token.sharegroup_uuid, "e1d0e58b-f89f-4237-84ab-b82077342359" + ) + self.assertEqual(token.status, "active") + self.assertEqual( + token.valid_for_sharegroup_uuid, + "e1d0e58b-f89f-4237-84ab-b82077342359", + ) + + def test_update_sharegroup_token(self): + """ + Tests that an Image Share Group Token can be updated + """ + with self.mock_put("/images/sharegroups/tokens/abc123") as m: + token = self.client.load(ImageShareGroupToken, "abc123") + token.label = "Updated Token Label" + token.save() + self.assertEqual(m.call_url, "/images/sharegroups/tokens/abc123") + self.assertEqual( + m.call_data, + { + "label": "Updated Token Label", + }, + ) + + def test_delete_sharegroup_token(self): + """ + Tests that deleting an Image Share Group Token creates the correct api request + """ + with self.mock_delete() as m: + token = ImageShareGroupToken(self.client, "abc123") + token.delete() + + self.assertEqual(m.call_url, "/images/sharegroups/tokens/abc123") + + def test_sharegroup_token_get_sharegroup(self): + """ + Tests that the Image Share Group associated with a Token can be retrieved + """ + with self.mock_get("/images/sharegroups/tokens/abc123/sharegroup") as m: + token = self.client.load(ImageShareGroupToken, "abc123") + sharegroup = token.get_sharegroup() + + self.assertEqual( + m.call_url, "/images/sharegroups/tokens/abc123/sharegroup" + ) + self.assertEqual(sharegroup.id, 1234) + + def test_sharegroup_token_get_images(self): + """ + Tests that the Images associated with a Token can be retrieved + """ + with self.mock_get( + "/images/sharegroups/tokens/abc123/sharegroup/images" + ) as m: + token = self.client.load(ImageShareGroupToken, "abc123") + images = token.get_images() + + self.assertEqual( + m.call_url, + "/images/sharegroups/tokens/abc123/sharegroup/images", + ) + self.assertEqual(len(images), 1) + self.assertEqual(images[0].id, "shared/1") diff --git a/test/unit/objects/image_test.py b/test/unit/objects/image_test.py new file mode 100644 index 000000000..1ea2fd66e --- /dev/null +++ b/test/unit/objects/image_test.py @@ -0,0 +1,134 @@ +from datetime import datetime +from io import BytesIO +from test.unit.base import ClientBaseCase +from typing import BinaryIO, Optional +from unittest.mock import patch + +from linode_api4.objects import Image, Region + +# A minimal gzipped image that will be accepted by the API +TEST_IMAGE_CONTENT = ( + b"\x1f\x8b\x08\x08\xbd\x5c\x91\x60\x00\x03\x74\x65\x73\x74\x2e\x69" + b"\x6d\x67\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00" +) + + +class ImageTest(ClientBaseCase): + """ + Tests methods of the Image class + """ + + def test_get_image(self): + """ + Tests that an image is loaded correctly by ID + """ + image = Image(self.client, "linode/debian9") + self.assertEqual(image._populated, False) + + self.assertEqual(image.label, "Debian 9") + self.assertEqual(image._populated, True) + + self.assertEqual(image.vendor, "Debian") + self.assertEqual(image.description, None) + self.assertEqual(image.deprecated, False) + self.assertEqual(image.status, "available") + self.assertEqual(image.type, "manual") + self.assertEqual(image.created_by, "linode") + self.assertEqual(image.size, 1100) + + self.assertEqual( + image.eol, + datetime(year=2026, month=7, day=1, hour=4, minute=0, second=0), + ) + + self.assertEqual( + image.expiry, + datetime(year=2026, month=8, day=1, hour=4, minute=0, second=0), + ) + + self.assertEqual( + image.updated, + datetime(year=2020, month=7, day=1, hour=4, minute=0, second=0), + ) + + self.assertEqual(image.tags[0], "tests") + self.assertEqual(image.total_size, 1100) + self.assertEqual(image.regions[0].region, "us-east") + self.assertEqual(image.regions[0].status, "available") + self.assertEqual(image.is_shared, False) + self.assertIsNone(image.image_sharing) + + def test_image_create_upload(self): + """ + Test that an image upload URL can be created successfully. + """ + + with self.mock_post("/images/upload") as m: + image, url = self.client.image_create_upload( + "Realest Image Upload", + "us-southeast", + description="very real image upload.", + tags=["test_tag", "test2"], + ) + + self.assertEqual(m.call_url, "/images/upload") + self.assertEqual(m.method, "post") + self.assertEqual( + m.call_data, + { + "label": "Realest Image Upload", + "region": "us-southeast", + "description": "very real image upload.", + "tags": ["test_tag", "test2"], + }, + ) + + self.assertEqual(image.id, "private/1337") + self.assertEqual(image.label, "Realest Image Upload") + self.assertEqual(image.description, "very real image upload.") + self.assertEqual(image.capabilities[0], "cloud-init") + self.assertEqual(image.tags[0], "test_tag") + self.assertEqual(image.tags[1], "test2") + + self.assertEqual(url, "https://linode.com/") + + def test_image_upload(self): + """ + Test that an image can be uploaded. + """ + + def put_mock(url: str, data: Optional[BinaryIO] = None, **kwargs): + self.assertEqual(url, "https://linode.com/") + self.assertEqual(data.read(), TEST_IMAGE_CONTENT) + + with patch("requests.put", put_mock), self.mock_post("/images/upload"): + image = self.client.image_upload( + "Realest Image Upload", + "us-southeast", + BytesIO(TEST_IMAGE_CONTENT), + description="very real image upload.", + tags=["test_tag", "test2"], + ) + + self.assertEqual(image.id, "private/1337") + self.assertEqual(image.label, "Realest Image Upload") + self.assertEqual(image.description, "very real image upload.") + self.assertEqual(image.tags[0], "test_tag") + self.assertEqual(image.tags[1], "test2") + + def test_image_replication(self): + """ + Test that image can be replicated. + """ + + replication_url = "/images/private/123/regions" + regions = ["us-east", Region(self.client, "us-west")] + with self.mock_post(replication_url) as m: + image = Image(self.client, "private/123") + image.replicate(regions) + + self.assertEqual(replication_url, m.call_url) + self.assertEqual( + m.call_data, + {"regions": ["us-east", "us-west"]}, + ) diff --git a/test/unit/objects/linode_interface_test.py b/test/unit/objects/linode_interface_test.py new file mode 100644 index 000000000..c021334e1 --- /dev/null +++ b/test/unit/objects/linode_interface_test.py @@ -0,0 +1,332 @@ +from datetime import datetime +from test.unit.base import ClientBaseCase + +from linode_api4 import ( + LinodeInterface, + LinodeInterfaceDefaultRouteOptions, + LinodeInterfaceOptions, + LinodeInterfacePublicIPv4AddressOptions, + LinodeInterfacePublicIPv4Options, + LinodeInterfacePublicIPv6Options, + LinodeInterfacePublicIPv6RangeOptions, + LinodeInterfacePublicOptions, + LinodeInterfaceVLANOptions, + LinodeInterfaceVPCIPv4AddressOptions, + LinodeInterfaceVPCIPv4Options, + LinodeInterfaceVPCIPv4RangeOptions, + LinodeInterfaceVPCIPv6SLAACOptions, + LinodeInterfaceVPCOptions, +) + + +def build_interface_options_public(): + return LinodeInterfaceOptions( + firewall_id=123, + default_route=LinodeInterfaceDefaultRouteOptions( + ipv4=True, + ipv6=True, + ), + public=LinodeInterfacePublicOptions( + ipv4=LinodeInterfacePublicIPv4Options( + addresses=[ + LinodeInterfacePublicIPv4AddressOptions( + address="172.30.0.50", primary=True + ) + ], + ), + ipv6=LinodeInterfacePublicIPv6Options( + ranges=[ + LinodeInterfacePublicIPv6RangeOptions( + range="2600:3c09:e001:59::/64" + ) + ] + ), + ), + ) + + +def build_interface_options_vpc(): + return LinodeInterfaceOptions( + firewall_id=123, + default_route=LinodeInterfaceDefaultRouteOptions( + ipv4=True, + ), + vpc=LinodeInterfaceVPCOptions( + subnet_id=123, + ipv4=LinodeInterfaceVPCIPv4Options( + addresses=[ + LinodeInterfaceVPCIPv4AddressOptions( + address="192.168.22.3", + primary=True, + nat_1_1_address="any", + ) + ], + ranges=[ + LinodeInterfaceVPCIPv4RangeOptions(range="192.168.22.16/28") + ], + ), + ), + ) + + +def build_interface_options_vlan(): + return LinodeInterfaceOptions( + vlan=LinodeInterfaceVLANOptions( + vlan_label="my_vlan", ipam_address="10.0.0.1/24" + ), + ) + + +class LinodeInterfaceTest(ClientBaseCase): + """ + Tests methods of the LinodeInterface class + """ + + @staticmethod + def assert_linode_124_interface_123(iface: LinodeInterface): + assert iface.id == 123 + + assert isinstance(iface.created, datetime) + assert isinstance(iface.updated, datetime) + + assert iface.default_route.ipv4 + assert iface.default_route.ipv6 + + assert iface.mac_address == "22:00:AB:CD:EF:01" + assert iface.version == 1 + + assert iface.vlan is None + assert iface.vpc is None + + # public.ipv4 assertions + assert iface.public.ipv4.addresses[0].address == "172.30.0.50" + assert iface.public.ipv4.addresses[0].primary + + assert iface.public.ipv4.shared[0].address == "172.30.0.51" + assert iface.public.ipv4.shared[0].linode_id == 125 + + # public.ipv6 assertions + assert iface.public.ipv6.ranges[0].range == "2600:3c09:e001:59::/64" + assert ( + iface.public.ipv6.ranges[0].route_target + == "2600:3c09::ff:feab:cdef" + ) + + assert iface.public.ipv6.ranges[1].range == "2600:3c09:e001:5a::/64" + assert ( + iface.public.ipv6.ranges[1].route_target + == "2600:3c09::ff:feab:cdef" + ) + + assert iface.public.ipv6.shared[0].range == "2600:3c09:e001:2a::/64" + assert iface.public.ipv6.shared[0].route_target is None + + assert iface.public.ipv6.slaac[0].address == "2600:3c09::ff:feab:cdef" + assert iface.public.ipv6.slaac[0].prefix == 64 + + @staticmethod + def assert_linode_124_interface_456(iface: LinodeInterface): + assert iface.id == 456 + + assert isinstance(iface.created, datetime) + assert isinstance(iface.updated, datetime) + + assert iface.default_route.ipv4 + assert not iface.default_route.ipv6 + + assert iface.mac_address == "22:00:AB:CD:EF:01" + assert iface.version == 1 + + assert iface.vlan is None + assert iface.public is None + + # vpc assertions + assert iface.vpc.vpc_id == 123456 + assert iface.vpc.subnet_id == 789 + + assert iface.vpc.ipv4.addresses[0].address == "192.168.22.3" + assert iface.vpc.ipv4.addresses[0].primary + + assert iface.vpc.ipv4.ranges[0].range == "192.168.22.16/28" + assert iface.vpc.ipv4.ranges[1].range == "192.168.22.32/28" + + assert iface.vpc.ipv6.is_public + + assert iface.vpc.ipv6.slaac[0].range == "1234::/64" + assert iface.vpc.ipv6.slaac[0].address == "1234::5678" + + assert iface.vpc.ipv6.ranges[0].range == "4321::/64" + + @staticmethod + def assert_linode_124_interface_789(iface: LinodeInterface): + assert iface.id == 789 + + assert isinstance(iface.created, datetime) + assert isinstance(iface.updated, datetime) + + assert iface.default_route.ipv4 is None + assert iface.default_route.ipv6 is None + + assert iface.mac_address == "22:00:AB:CD:EF:01" + assert iface.version == 1 + + assert iface.public is None + assert iface.vpc is None + + # vlan assertions + assert iface.vlan.vlan_label == "my_vlan" + assert iface.vlan.ipam_address == "10.0.0.1/24" + + def test_get_public(self): + iface = LinodeInterface(self.client, 123, 124) + + self.assert_linode_124_interface_123(iface) + iface.invalidate() + self.assert_linode_124_interface_123(iface) + + def test_get_vpc(self): + iface = LinodeInterface(self.client, 456, 124) + + self.assert_linode_124_interface_456(iface) + iface.invalidate() + self.assert_linode_124_interface_456(iface) + + def test_get_vlan(self): + iface = LinodeInterface(self.client, 789, 124) + + self.assert_linode_124_interface_789(iface) + iface.invalidate() + self.assert_linode_124_interface_789(iface) + + def test_update_public(self): + iface = LinodeInterface(self.client, 123, 124) + + self.assert_linode_124_interface_123(iface) + + iface.default_route.ipv4 = False + iface.default_route.ipv6 = False + + iface.public.ipv4.addresses = [ + LinodeInterfacePublicIPv4AddressOptions( + address="172.30.0.51", + primary=False, + ) + ] + + iface.public.ipv6.ranges = [ + LinodeInterfacePublicIPv6RangeOptions( + range="2600:3c09:e001:58::/64" + ) + ] + + with self.mock_put("/linode/instances/124/interfaces/123") as m: + iface.save() + + assert m.called + + assert m.call_data == { + "default_route": { + "ipv4": False, + "ipv6": False, + }, + "public": { + "ipv4": { + "addresses": [ + { + "address": "172.30.0.51", + "primary": False, + }, + ] + }, + "ipv6": { + "ranges": [ + { + "range": "2600:3c09:e001:58::/64", + } + ] + }, + }, + } + + def test_update_vpc(self): + iface = LinodeInterface(self.client, 456, 124) + + self.assert_linode_124_interface_456(iface) + + iface.default_route.ipv4 = False + + iface.vpc.subnet_id = 456 + + iface.vpc.ipv4.addresses = [ + LinodeInterfaceVPCIPv4AddressOptions( + address="192.168.22.4", primary=False, nat_1_1_address="auto" + ) + ] + + iface.vpc.ipv4.ranges = [ + LinodeInterfaceVPCIPv4RangeOptions( + range="192.168.22.17/28", + ) + ] + + iface.vpc.ipv6.is_public = False + + iface.vpc.ipv6.slaac = [ + LinodeInterfaceVPCIPv6SLAACOptions( + range="1233::/64", + ) + ] + + iface.vpc.ipv6.ranges = [ + LinodeInterfacePublicIPv6RangeOptions(range="9876::/64") + ] + + with self.mock_put("/linode/instances/124/interfaces/456") as m: + iface.save() + + assert m.called + + assert m.call_data == { + "default_route": { + "ipv4": False, + }, + "vpc": { + "subnet_id": 456, + "ipv4": { + "addresses": [ + { + "address": "192.168.22.4", + "primary": False, + "nat_1_1_address": "auto", + }, + ], + "ranges": [{"range": "192.168.22.17/28"}], + }, + "ipv6": { + "is_public": False, + "slaac": [{"range": "1233::/64"}], + "ranges": [{"range": "9876::/64"}], + }, + }, + } + + def test_delete(self): + iface = LinodeInterface(self.client, 123, 124) + + with self.mock_delete() as m: + iface.delete() + assert m.called + + def test_firewalls(self): + iface = LinodeInterface(self.client, 123, 124) + + firewalls = iface.firewalls() + + assert len(firewalls) == 1 + + assert firewalls[0].id == 123 + + # Check a few fields to make sure the Firewall object was populated + assert firewalls[0].label == "firewall123" + assert firewalls[0].rules.inbound[0].action == "ACCEPT" + assert firewalls[0].status == "enabled" diff --git a/test/unit/objects/linode_test.py b/test/unit/objects/linode_test.py new file mode 100644 index 000000000..40bbb5069 --- /dev/null +++ b/test/unit/objects/linode_test.py @@ -0,0 +1,1130 @@ +from datetime import datetime +from test.unit.base import ClientBaseCase +from test.unit.objects.linode_interface_test import ( + LinodeInterfaceTest, + build_interface_options_public, + build_interface_options_vlan, + build_interface_options_vpc, +) + +from linode_api4 import ( + InstanceDiskEncryptionType, + InterfaceGeneration, + NetworkInterface, +) +from linode_api4.objects import ( + Config, + ConfigInterface, + ConfigInterfaceIPv4, + ConfigInterfaceIPv6, + ConfigInterfaceIPv6Options, + ConfigInterfaceIPv6Range, + ConfigInterfaceIPv6RangeOptions, + ConfigInterfaceIPv6SLAAC, + ConfigInterfaceIPv6SLAACOptions, + Disk, + Image, + Instance, + StackScript, + Type, + VPCSubnet, +) + + +class LinodeTest(ClientBaseCase): + """ + Tests methods of the Linode class + """ + + def test_get_linode(self): + """ + Tests that a client is loaded correctly by ID + """ + linode = Instance(self.client, 123) + self.assertEqual(linode._populated, False) + + self.assertEqual(linode.label, "linode123") + self.assertEqual(linode.group, "test") + + self.assertTrue(isinstance(linode.image, Image)) + self.assertEqual(linode.image.label, "Ubuntu 17.04") + self.assertEqual( + linode.host_uuid, "3a3ddd59d9a78bb8de041391075df44de62bfec8" + ) + self.assertEqual(linode.watchdog_enabled, True) + self.assertEqual( + linode.disk_encryption, InstanceDiskEncryptionType.disabled + ) + self.assertEqual(linode.lke_cluster_id, None) + self.assertEqual(linode.maintenance_policy, "linode/migrate") + + json = linode._raw_json + self.assertIsNotNone(json) + self.assertEqual(json["id"], 123) + self.assertEqual(json["label"], "linode123") + self.assertEqual(json["group"], "test") + + # test that the _raw_json stored on the object is sufficient to populate + # a new object + linode2 = Instance(self.client, json["id"], json=json) + + self.assertTrue(linode2._populated) + self.assertEqual(linode2.id, linode.id) + self.assertEqual(linode2.label, linode.label) + self.assertEqual(linode2.group, linode.group) + self.assertEqual(linode2._raw_json, linode._raw_json) + + def test_transfer(self): + """ + Tests that you can get transfer + """ + linode = Instance(self.client, 123) + + transfer = linode.transfer + + self.assertEqual(transfer.quota, 471) + self.assertEqual(transfer.billable, 0) + self.assertEqual(transfer.used, 10369075) + + def test_rebuild(self): + """ + Tests that you can rebuild with an image + """ + linode = Instance(self.client, 123) + + with self.mock_post("/linode/instances/123") as m: + pw = linode.rebuild( + "linode/debian9", + disk_encryption=InstanceDiskEncryptionType.enabled, + ) + + self.assertIsNotNone(pw) + self.assertTrue(isinstance(pw, str)) + + self.assertEqual(m.call_url, "/linode/instances/123/rebuild") + + self.assertEqual( + m.call_data, + { + "image": "linode/debian9", + "root_pass": pw, + "disk_encryption": "enabled", + }, + ) + + def test_available_backups(self): + """ + Tests that a Linode can retrieve its own backups + """ + linode = Instance(self.client, 123) + + backups = linode.available_backups + + # assert we got the correct number of automatic backups + self.assertEqual(len(backups.automatic), 3) + + # examine one automatic backup + b = backups.automatic[0] + self.assertEqual(b.id, 12345) + self.assertEqual(b._populated, True) + self.assertEqual(b.status, "successful") + self.assertEqual(b.type, "auto") + self.assertEqual( + b.created, + datetime(year=2018, month=1, day=9, hour=0, minute=1, second=1), + ) + self.assertEqual( + b.updated, + datetime(year=2018, month=1, day=9, hour=0, minute=1, second=1), + ) + self.assertEqual( + b.finished, + datetime(year=2018, month=1, day=9, hour=0, minute=1, second=1), + ) + self.assertEqual(b.region.id, "us-east-1a") + self.assertEqual(b.label, None) + self.assertEqual(b.message, None) + self.assertEqual(b.available, True) + + self.assertEqual(len(b.disks), 2) + self.assertEqual(b.disks[0].size, 1024) + self.assertEqual(b.disks[0].label, "Debian 8.1 Disk") + self.assertEqual(b.disks[0].filesystem, "ext4") + self.assertEqual(b.disks[1].size, 0) + self.assertEqual(b.disks[1].label, "256MB Swap Image") + self.assertEqual(b.disks[1].filesystem, "swap") + + self.assertEqual(len(b.configs), 1) + self.assertEqual(b.configs[0], "My Debian 8.1 Profile") + + # assert that snapshots came back as expected + self.assertEqual(backups.snapshot.current, None) + self.assertEqual(backups.snapshot.in_progress, None) + + def test_update_linode(self): + """ + Tests that a Linode can be updated + """ + with self.mock_put("linode/instances/123") as m: + linode = self.client.load(Instance, 123) + + linode.label = "NewLinodeLabel" + linode.group = "new_group" + linode.maintenance_policy = "linode/power_off_on" + linode.save() + + self.assertEqual(m.call_url, "/linode/instances/123") + self.assertEqual( + m.call_data, + { + "alerts": { + "cpu": 90, + "io": 5000, + "network_in": 5, + "network_out": 5, + "transfer_quota": 80, + }, + "backups": { + "enabled": True, + "schedule": {"day": "Scheduling", "window": "W02"}, + }, + "label": "NewLinodeLabel", + "group": "new_group", + "tags": ["something"], + "watchdog_enabled": True, + "maintenance_policy": "linode/power_off_on", + }, + ) + + def test_delete_linode(self): + """ + Tests that deleting a Linode creates the correct api request + """ + with self.mock_delete() as m: + linode = Instance(self.client, 123) + linode.delete() + + self.assertEqual(m.call_url, "/linode/instances/123") + + def test_reboot(self): + """ + Tests that you can submit a correct reboot api request + """ + linode = Instance(self.client, 123) + result = {} + + with self.mock_post(result) as m: + linode.reboot() + self.assertEqual(m.call_url, "/linode/instances/123/reboot") + + def test_shutdown(self): + """ + Tests that you can submit a correct shutdown api request + """ + linode = Instance(self.client, 123) + result = {} + + with self.mock_post(result) as m: + linode.shutdown() + self.assertEqual(m.call_url, "/linode/instances/123/shutdown") + + def test_boot(self): + """ + Tests that you can submit a correct boot api request + """ + linode = Instance(self.client, 123) + result = {} + + with self.mock_post(result) as m: + linode.boot() + self.assertEqual(m.call_url, "/linode/instances/123/boot") + + def test_resize(self): + """ + Tests that you can submit a correct resize api request + """ + linode = Instance(self.client, 123) + result = {} + + with self.mock_post(result) as m: + linode.resize(new_type="g6-standard-1") + self.assertEqual(m.call_url, "/linode/instances/123/resize") + self.assertEqual(m.call_data["type"], "g6-standard-1") + + def test_resize_with_class(self): + """ + Tests that you can submit a correct resize api request with a Base class type + """ + linode = Instance(self.client, 123) + ltype = Type(self.client, "g6-standard-2") + result = {} + + with self.mock_post(result) as m: + linode.resize(new_type=ltype) + self.assertEqual(m.call_url, "/linode/instances/123/resize") + self.assertEqual(m.call_data["type"], "g6-standard-2") + + def test_boot_with_config(self): + """ + Tests that you can submit a correct boot with a config api request + """ + linode = Instance(self.client, 123) + config = linode.configs[0] + result = {} + + with self.mock_post(result) as m: + linode.boot(config=config) + self.assertEqual(m.call_url, "/linode/instances/123/boot") + + def test_mutate(self): + """ + Tests that you can submit a correct mutate api request + """ + linode = Instance(self.client, 123) + result = {} + + with self.mock_post(result) as m: + linode.mutate() + self.assertEqual(m.call_url, "/linode/instances/123/mutate") + self.assertEqual(m.call_data["allow_auto_disk_resize"], True) + + def test_firewalls(self): + """ + Tests that you can submit a correct firewalls api request + """ + linode = Instance(self.client, 123) + + with self.mock_get("/linode/instances/123/firewalls") as m: + result = linode.firewalls() + self.assertEqual(m.call_url, "/linode/instances/123/firewalls") + self.assertEqual(len(result), 1) + + def test_apply_firewalls(self): + """ + Tests that you can submit a correct apply firewalls api request + """ + linode = Instance(self.client, 123) + + with self.mock_post({}) as m: + result = linode.apply_firewalls() + self.assertEqual( + m.call_url, "/linode/instances/123/firewalls/apply" + ) + self.assertEqual(result, True) + + def test_volumes(self): + """ + Tests that you can submit a correct volumes api request + """ + linode = Instance(self.client, 123) + + with self.mock_get("/linode/instances/123/volumes") as m: + result = linode.volumes() + self.assertEqual(m.call_url, "/linode/instances/123/volumes") + self.assertEqual(len(result), 1) + + def test_nodebalancers(self): + """ + Tests that you can submit a correct nodebalancers api request + """ + linode = Instance(self.client, 123) + + with self.mock_get("/linode/instances/123/nodebalancers") as m: + result = linode.nodebalancers() + self.assertEqual(m.call_url, "/linode/instances/123/nodebalancers") + self.assertEqual(len(result), 1) + + def test_transfer_year_month(self): + """ + Tests that you can submit a correct transfer api request + """ + linode = Instance(self.client, 123) + + with self.mock_get("/linode/instances/123/transfer/2023/4") as m: + linode.transfer_year_month(2023, 4) + self.assertEqual( + m.call_url, "/linode/instances/123/transfer/2023/4" + ) + + def test_lke_cluster(self): + """ + Tests that you can grab the parent LKE cluster from an instance node + """ + linode = Instance(self.client, 456) + + assert linode.lke_cluster_id == 18881 + assert linode.lke_cluster.id == linode.lke_cluster_id + + def test_duplicate(self): + """ + Tests that you can submit a correct disk clone api request + """ + disk = Disk(self.client, 12345, 123) + + with self.mock_post("/linode/instances/123/disks/12345/clone") as m: + disk.duplicate() + self.assertEqual( + m.call_url, "/linode/instances/123/disks/12345/clone" + ) + + assert disk.disk_encryption == InstanceDiskEncryptionType.disabled + + def test_disk_password(self): + """ + Tests that you can submit a correct disk password reset api request + """ + disk = Disk(self.client, 12345, 123) + + with self.mock_post({}) as m: + disk.reset_root_password() + self.assertEqual( + m.call_url, "/linode/instances/123/disks/12345/password" + ) + + def test_instance_password(self): + """ + Tests that you can submit a correct instance password reset api request + """ + instance = Instance(self.client, 123) + + with self.mock_post({}) as m: + instance.reset_instance_root_password() + self.assertEqual(m.call_url, "/linode/instances/123/password") + + def test_ips(self): + """ + Tests that you can submit a correct ips api request + """ + linode = Instance(self.client, 123) + + ips = linode.ips + + assert ips.ipv4 is not None + assert ips.ipv6 is not None + assert ips.ipv4.public is not None + assert ips.ipv4.private is not None + assert ips.ipv4.shared is not None + assert ips.ipv4.reserved is not None + assert ips.ipv4.vpc is not None + assert ips.ipv6.slaac is not None + assert ips.ipv6.link_local is not None + assert ips.ipv6.ranges is not None + + vpc_ip = ips.ipv4.vpc[0] + assert vpc_ip.nat_1_1 == "172.233.179.133" + assert vpc_ip.address_range == None + assert vpc_ip.vpc_id == 39246 + assert vpc_ip.subnet_id == 39388 + assert vpc_ip.config_id == 59036295 + assert vpc_ip.interface_id == 1186165 + assert vpc_ip.active + + def test_initiate_migration(self): + """ + Tests that you can initiate a pending migration + """ + linode = Instance(self.client, 123) + result = {} + + with self.mock_post(result) as m: + linode.initiate_migration() + self.assertEqual(m.call_url, "/linode/instances/123/migrate") + + def test_create_disk(self): + """ + Tests that disk_create behaves as expected + """ + linode = Instance(self.client, 123) + + with self.mock_post("/linode/instances/123/disks/12345") as m: + disk, gen_pass = linode.disk_create( + 1234, + label="test", + authorized_users=["test"], + image="linode/debian12", + ) + self.assertEqual(m.call_url, "/linode/instances/123/disks") + self.assertEqual( + m.call_data, + { + "size": 1234, + "label": "test", + "root_pass": gen_pass, + "image": "linode/debian12", + "authorized_users": ["test"], + "read_only": False, + }, + ) + + assert disk.id == 12345 + assert disk.disk_encryption == InstanceDiskEncryptionType.disabled + + def test_get_placement_group(self): + """ + Tests that you can get the placement group for a Linode + """ + linode = Instance(self.client, 123) + + pg = linode.placement_group + + assert pg.id == 123 + assert pg.label == "test" + assert pg.placement_group_type == "anti_affinity:local" + + # Invalidate the instance and try again + # This makes sure the implicit refresh/cache logic works + # as expected + linode.invalidate() + + pg = linode.placement_group + + assert pg.id == 123 + assert pg.label == "test" + assert pg.placement_group_type == "anti_affinity:local" + + def test_get_interfaces(self): + # Local import to avoid circular dependency + from linode_interface_test import ( # pylint: disable=import-outside-toplevel + LinodeInterfaceTest, + ) + + instance = Instance(self.client, 124) + + assert instance.interface_generation == InterfaceGeneration.LINODE + + interfaces = instance.linode_interfaces + + LinodeInterfaceTest.assert_linode_124_interface_123( + next(iface for iface in interfaces if iface.id == 123) + ) + + LinodeInterfaceTest.assert_linode_124_interface_456( + next(iface for iface in interfaces if iface.id == 456) + ) + + LinodeInterfaceTest.assert_linode_124_interface_789( + next(iface for iface in interfaces if iface.id == 789) + ) + + def test_get_interfaces_settings(self): + instance = Instance(self.client, 124) + iface_settings = instance.interfaces_settings + + assert iface_settings.network_helper + + assert iface_settings.default_route.ipv4_interface_id == 123 + assert iface_settings.default_route.ipv4_eligible_interface_ids == [ + 123, + 456, + 789, + ] + + assert iface_settings.default_route.ipv6_interface_id == 456 + assert iface_settings.default_route.ipv6_eligible_interface_ids == [ + 123, + 456, + ] + + def test_update_interfaces_settings(self): + instance = Instance(self.client, 124) + iface_settings = instance.interfaces_settings + + iface_settings.network_helper = False + iface_settings.default_route.ipv4_interface_id = 456 + iface_settings.default_route.ipv6_interface_id = 123 + + with self.mock_put("/linode/instances/124/interfaces/settings") as m: + iface_settings.save() + + assert m.call_data == { + "network_helper": False, + "default_route": { + "ipv4_interface_id": 456, + "ipv6_interface_id": 123, + }, + } + + def test_upgrade_interfaces(self): + # Local import to avoid circular dependency + from linode_interface_test import ( # pylint: disable=import-outside-toplevel + LinodeInterfaceTest, + ) + + instance = Instance(self.client, 124) + + with self.mock_post("/linode/instances/124/upgrade-interfaces") as m: + result = instance.upgrade_interfaces(123) + + assert m.called + assert m.call_data == {"config_id": 123, "dry_run": False} + + assert result.config_id == 123 + assert result.dry_run + + LinodeInterfaceTest.assert_linode_124_interface_123( + result.interfaces[0] + ) + LinodeInterfaceTest.assert_linode_124_interface_456( + result.interfaces[1] + ) + LinodeInterfaceTest.assert_linode_124_interface_789( + result.interfaces[2] + ) + + def test_upgrade_interfaces_dry(self): + instance = Instance(self.client, 124) + + with self.mock_post("/linode/instances/124/upgrade-interfaces") as m: + result = instance.upgrade_interfaces(123, dry_run=True) + + assert m.called + assert m.call_data == { + "config_id": 123, + "dry_run": True, + } + + assert result.config_id == 123 + assert result.dry_run + + # We don't use the assertion helpers here because dry runs return + # a MappedObject. + assert result.interfaces[0].id == 123 + assert result.interfaces[0].public is not None + + assert result.interfaces[1].id == 456 + assert result.interfaces[1].vpc is not None + + assert result.interfaces[2].id == 789 + assert result.interfaces[2].vlan is not None + + def test_create_interface_public(self): + instance = Instance(self.client, 124) + + iface = build_interface_options_public() + + with self.mock_post("/linode/instances/124/interfaces/123") as m: + result = instance.interface_create(**vars(iface)) + + assert m.call_data == { + "firewall_id": iface.firewall_id, + "default_route": iface.default_route._serialize(), + "public": iface.public._serialize(), + } + + LinodeInterfaceTest.assert_linode_124_interface_123(result) + + def test_create_interface_vpc(self): + instance = Instance(self.client, 124) + + iface = build_interface_options_vpc() + + with self.mock_post("/linode/instances/124/interfaces/456") as m: + result = instance.interface_create(**vars(iface)) + + assert m.call_data == { + "firewall_id": iface.firewall_id, + "default_route": iface.default_route._serialize(), + "vpc": iface.vpc._serialize(), + } + + LinodeInterfaceTest.assert_linode_124_interface_456(result) + + def test_create_interface_vlan(self): + instance = Instance(self.client, 124) + + iface = build_interface_options_vlan() + + with self.mock_post("/linode/instances/124/interfaces/789") as m: + result = instance.interface_create(**vars(iface)) + + assert m.call_data == {"vlan": iface.vlan._serialize()} + + LinodeInterfaceTest.assert_linode_124_interface_789(result) + + +class DiskTest(ClientBaseCase): + """ + Tests for the Disk object + """ + + def test_resize(self): + """ + Tests that a resize is submitted correctly + """ + disk = Disk(self.client, 12345, 123) + + with self.mock_post({}) as m: + r = disk.resize(1000) + + self.assertTrue(r) + + self.assertEqual( + m.call_url, "/linode/instances/123/disks/12345/resize" + ) + self.assertEqual(m.call_data, {"size": 1000}) + + +class ConfigTest(ClientBaseCase): + """ + Tests for the Config object + """ + + def test_update_interfaces(self): + """ + Tests that a configs interfaces update correctly + """ + + json = self.client.get("/linode/instances/123/configs/456789") + config = Config(self.client, 456789, 123, json=json) + + with self.mock_put("/linode/instances/123/configs/456789") as m: + new_interfaces = [ + {"purpose": "public", "primary": True}, + ConfigInterface("vlan", label="cool-vlan"), + ConfigInterface( + "vpc", + vpc_id=18881, + subnet_id=123, + ipv4=ConfigInterfaceIPv4(vpc="10.0.0.4", nat_1_1="any"), + ipv6=ConfigInterfaceIPv6( + slaac=[ + ConfigInterfaceIPv6SLAAC( + range="1234::5678/64", address="1234::5678" + ) + ], + ranges=[ + ConfigInterfaceIPv6Range(range="1234::5678/64") + ], + is_public=True, + ), + ), + ] + + config.interfaces = new_interfaces + + config.save() + + assert m.call_url == "/linode/instances/123/configs/456789" + assert m.call_data.get("interfaces") == [ + { + "purpose": "public", + "primary": True, + }, + { + "purpose": "vlan", + "label": "cool-vlan", + }, + { + "purpose": "vpc", + "subnet_id": 123, + "ipv4": { + "vpc": "10.0.0.4", + "nat_1_1": "any", + }, + "ipv6": { + "slaac": [ + { + "range": "1234::5678/64", + # NOTE: Address is read-only so it shouldn't be specified here + } + ], + "ranges": [ + { + "range": "1234::5678/64", + } + ], + "is_public": True, + }, + }, + ] + + def test_get_config(self): + json = self.client.get("/linode/instances/123/configs/456789") + config = Config(self.client, 456789, 123, json=json) + + self.assertEqual(config.root_device, "/dev/sda") + self.assertEqual(config.comments, "") + self.assertIsNotNone(config.helpers) + self.assertEqual(config.label, "My Ubuntu 17.04 LTS Profile") + self.assertEqual( + config.created, + datetime(year=2014, month=10, day=7, hour=20, minute=4, second=0), + ) + self.assertEqual(config.memory_limit, 0) + self.assertEqual(config.id, 456789) + self.assertIsNotNone(config.interfaces) + self.assertEqual(config.run_level, "default") + self.assertIsNone(config.initrd) + self.assertEqual(config.virt_mode, "paravirt") + self.assertIsNotNone(config.devices) + + def test_interface_ipv4(self): + json = {"vpc": "10.0.0.1", "nat_1_1": "any"} + + ipv4 = ConfigInterfaceIPv4.from_json(json) + + self.assertEqual(ipv4.vpc, "10.0.0.1") + self.assertEqual(ipv4.nat_1_1, "any") + + def test_interface_ipv6(self): + json = { + "slaac": [{"range": "1234::5678/64", "address": "1234::5678"}], + "ranges": [{"range": "1234::5678/64"}], + "is_public": True, + } + + ipv6 = ConfigInterfaceIPv6.from_json(json) + + assert len(ipv6.slaac) == 1 + assert ipv6.slaac[0].range == "1234::5678/64" + assert ipv6.slaac[0].address == "1234::5678" + + assert len(ipv6.ranges) == 1 + assert ipv6.ranges[0].range == "1234::5678/64" + + assert ipv6.is_public + + def test_config_devices_unwrap(self): + """ + Tests that config devices can be successfully converted to a dict. + """ + + inst = Instance(self.client, 123) + assert inst.configs[0].devices.dict.get("sda").get("id") == 12345 + + +class StackScriptTest(ClientBaseCase): + """ + Tests the methods of the StackScript class. + """ + + def test_get_stackscript(self): + """ + Tests that a stackscript is loaded correctly by ID + """ + stackscript = StackScript(self.client, 10079) + + self.assertEqual(stackscript.id, 10079) + self.assertEqual(stackscript.deployments_active, 1) + self.assertEqual(stackscript.deployments_total, 12) + self.assertEqual(stackscript.rev_note, "Set up MySQL") + self.assertTrue(stackscript.mine) + self.assertTrue(stackscript.is_public) + self.assertIsNotNone(stackscript.user_defined_fields) + self.assertIsNotNone(stackscript.images) + + +class TypeTest(ClientBaseCase): + + def test_get_type_by_id(self): + """ + Tests that a Linode type is loaded correctly by ID + """ + t = Type(self.client, "g6-nanode-1") + self.assertEqual(t._populated, False) + + self.assertEqual(t.vcpus, 1) + self.assertEqual(t.gpus, 0) + self.assertEqual(t.label, "Linode 1024") + self.assertEqual(t.disk, 20480) + self.assertEqual(t.type_class, "nanode") + self.assertEqual(t.region_prices[0].id, "us-east") + + def test_get_type_gpu(self): + """ + Tests that gpu types load up right + """ + t = Type(self.client, "g6-gpu-2") + self.assertEqual(t._populated, False) + + self.assertEqual(t.gpus, 1) + self.assertEqual(t._populated, True) + + def test_load_type(self): + """ + Tests that a type can be loaded using LinodeClient.load(...) + """ + + t = self.client.load(Type, "g6-nanode-1") + self.assertEqual(t._populated, True) + self.assertEqual(t.type_class, "nanode") + + def test_save_noforce(self): + """ + Tests that a client will only save if changes are detected + """ + linode = Instance(self.client, 123) + self.assertEqual(linode._populated, False) + + self.assertEqual(linode.label, "linode123") + self.assertEqual(linode.group, "test") + + assert not linode._changed + + with self.mock_put("linode/instances") as m: + linode.save(force=False) + assert not m.called + + linode.label = "blah" + assert linode._changed + + with self.mock_put("linode/instances") as m: + linode.save(force=False) + assert m.called + assert m.call_url == "/linode/instances/123" + assert m.call_data["label"] == "blah" + + assert not linode._changed + + def test_save_force(self): + """ + Tests that a client will forcibly save by default + """ + linode = Instance(self.client, 123) + self.assertEqual(linode._populated, False) + + self.assertEqual(linode.label, "linode123") + self.assertEqual(linode.group, "test") + + assert not linode._changed + + with self.mock_put("linode/instances") as m: + linode.save() + assert m.called + + +class ConfigInterfaceTest(ClientBaseCase): + def test_list(self): + config = Config(self.client, 456789, 123) + config._api_get() + assert {v.id for v in config.interfaces} == {123, 321, 456} + assert {v.purpose for v in config.interfaces} == { + "vlan", + "vpc", + "public", + } + + def test_update(self): + config = Config(self.client, 456789, 123) + config._api_get() + config.interfaces = [ + {"purpose": "public"}, + ConfigInterface( + purpose="vlan", label="cool-vlan", ipam_address="10.0.0.4/32" + ), + ] + + with self.mock_put("linode/instances/123/configs/456789") as m: + config.save() + assert m.call_url == "/linode/instances/123/configs/456789" + assert m.call_data["interfaces"] == [ + {"purpose": "public"}, + { + "purpose": "vlan", + "label": "cool-vlan", + "ipam_address": "10.0.0.4/32", + }, + ] + + +class TestNetworkInterface(ClientBaseCase): + def test_create_interface_public(self): + config = Config(self.client, 456789, 123) + config._api_get() + + with self.mock_post( + "linode/instances/123/configs/456789/interfaces/456" + ) as m: + interface = config.interface_create_public(primary=True) + + assert m.called + assert ( + m.call_url == "/linode/instances/123/configs/456789/interfaces" + ) + assert m.method == "post" + assert m.call_data == {"purpose": "public", "primary": True} + + assert interface.id == 456 + assert interface.purpose == "public" + assert interface.primary + + def test_create_interface_vlan(self): + config = Config(self.client, 456789, 123) + config._api_get() + + with self.mock_post( + "linode/instances/123/configs/456789/interfaces/321" + ) as m: + interface = config.interface_create_vlan( + "test-interface", ipam_address="10.0.0.2/32" + ) + + assert m.called + assert ( + m.call_url == "/linode/instances/123/configs/456789/interfaces" + ) + assert m.method == "post" + assert m.call_data == { + "purpose": "vlan", + "label": "test-interface", + "ipam_address": "10.0.0.2/32", + } + + assert interface.id == 321 + assert interface.purpose == "vlan" + assert not interface.primary + assert interface.label == "test-interface" + assert interface.ipam_address == "10.0.0.2" + + def test_create_interface_vpc(self): + config = Config(self.client, 456789, 123) + config._api_get() + + with self.mock_post( + "linode/instances/123/configs/456789/interfaces/123" + ) as m: + interface = config.interface_create_vpc( + subnet=VPCSubnet(self.client, 789, 123456), + primary=True, + ipv4=ConfigInterfaceIPv4(vpc="10.0.0.4", nat_1_1="any"), + ipv6=ConfigInterfaceIPv6Options( + slaac=[ConfigInterfaceIPv6SLAACOptions(range="auto")], + ranges=[ConfigInterfaceIPv6RangeOptions(range="auto")], + is_public=True, + ), + ip_ranges=["10.0.0.0/24"], + ) + + assert m.called + assert ( + m.call_url == "/linode/instances/123/configs/456789/interfaces" + ) + assert m.method == "post" + assert m.call_data == { + "purpose": "vpc", + "primary": True, + "subnet_id": 789, + "ipv4": {"vpc": "10.0.0.4", "nat_1_1": "any"}, + "ipv6": { + "slaac": [{"range": "auto"}], + "ranges": [{"range": "auto"}], + "is_public": True, + }, + "ip_ranges": ["10.0.0.0/24"], + } + + assert interface.id == 123 + assert interface.purpose == "vpc" + assert interface.primary + assert interface.vpc.id == 123456 + assert interface.subnet.id == 789 + + assert interface.ipv4.vpc == "10.0.0.2" + assert interface.ipv4.nat_1_1 == "any" + + assert len(interface.ipv6.slaac) == 1 + assert interface.ipv6.slaac[0].range == "1234::5678/64" + assert interface.ipv6.slaac[0].address == "1234::5678" + + assert len(interface.ipv6.ranges) == 1 + assert interface.ipv6.ranges[0].range == "1234::5678/64" + + assert interface.ipv6.is_public + + assert interface.ip_ranges == ["10.0.0.0/24"] + + def test_update(self): + interface = NetworkInterface(self.client, 123, 456789, 123) + interface._api_get() + + interface.ipv4.vpc = "10.0.0.3" + interface.ipv6.is_public = False + interface.primary = False + interface.ip_ranges = ["10.0.0.2/32"] + + with self.mock_put( + "linode/instances/123/configs/456789/interfaces/123/put" + ) as m: + interface.save() + + assert m.called + assert ( + m.call_url + == "/linode/instances/123/configs/456789/interfaces/123" + ) + assert m.method == "put" + assert m.call_data == { + "primary": False, + "ipv4": {"vpc": "10.0.0.3", "nat_1_1": "any"}, + "ipv6": { + "slaac": [{"range": "1234::5678/64"}], + "ranges": [{"range": "1234::5678/64"}], + "is_public": False, + }, + "ip_ranges": ["10.0.0.2/32"], + } + + def test_get_vlan(self): + interface = NetworkInterface(self.client, 321, 456789, instance_id=123) + interface._api_get() + + self.assertEqual(interface.id, 321) + self.assertEqual(interface.ipam_address, "10.0.0.2") + self.assertEqual(interface.purpose, "vlan") + self.assertEqual(interface.label, "test-interface") + + def test_get_vpc(self): + interface = NetworkInterface(self.client, 123, 456789, instance_id=123) + interface._api_get() + + self.assertEqual(interface.id, 123) + self.assertEqual(interface.purpose, "vpc") + self.assertEqual(interface.vpc.id, 123456) + self.assertEqual(interface.subnet.id, 789) + + self.assertEqual(interface.ipv4.vpc, "10.0.0.2") + self.assertEqual(interface.ipv4.nat_1_1, "any") + + self.assertEqual(len(interface.ipv6.slaac), 1) + self.assertEqual(interface.ipv6.slaac[0].range, "1234::5678/64") + self.assertEqual(interface.ipv6.slaac[0].address, "1234::5678") + self.assertEqual(len(interface.ipv6.ranges), 1) + self.assertEqual(interface.ipv6.ranges[0].range, "1234::5678/64") + self.assertEqual(interface.ipv6.is_public, True) + + self.assertEqual(interface.ip_ranges, ["10.0.0.0/24"]) + self.assertEqual(interface.active, True) + + def test_list(self): + config = Config(self.client, 456789, 123) + config._api_get() + interfaces = config.network_interfaces + + assert {v.id for v in interfaces} == {123, 321, 456} + assert {v.purpose for v in interfaces} == { + "vlan", + "vpc", + "public", + } + + for v in interfaces: + assert isinstance(v, NetworkInterface) + + def test_reorder(self): + config = Config(self.client, 456789, 123) + config._api_get() + interfaces = config.network_interfaces + + with self.mock_post({}) as m: + interfaces.reverse() + # Let's make sure it supports both IDs and NetworkInterfaces + interfaces[2] = interfaces[2].id + + config.interface_reorder(interfaces) + + assert ( + m.call_url + == "/linode/instances/123/configs/456789/interfaces/order" + ) + + assert m.call_data == {"ids": [321, 123, 456]} diff --git a/test/unit/objects/lke_test.py b/test/unit/objects/lke_test.py new file mode 100644 index 000000000..91f9ed3fe --- /dev/null +++ b/test/unit/objects/lke_test.py @@ -0,0 +1,567 @@ +from datetime import datetime +from test.unit.base import ClientBaseCase +from unittest.mock import MagicMock + +from linode_api4 import InstanceDiskEncryptionType, TieredKubeVersion +from linode_api4.objects import ( + LKECluster, + LKEClusterControlPlaneACLAddressesOptions, + LKEClusterControlPlaneACLOptions, + LKEClusterControlPlaneOptions, + LKENodePool, +) +from linode_api4.objects.lke import LKENodePoolNode, LKENodePoolTaint + + +class LKETest(ClientBaseCase): + """ + Tests methods of the LKE class + """ + + def test_get_cluster(self): + """ + Tests that the LKECluster object is properly generated. + """ + + cluster = LKECluster(self.client, 18881) + + self.assertEqual(cluster.id, 18881) + self.assertEqual( + cluster.created, + datetime(year=2021, month=2, day=10, hour=23, minute=54, second=21), + ) + self.assertEqual( + cluster.updated, + datetime(year=2021, month=2, day=10, hour=23, minute=54, second=21), + ) + self.assertEqual(cluster.label, "example-cluster") + self.assertEqual(cluster.tags, []) + self.assertEqual(cluster.region.id, "ap-west") + self.assertEqual(cluster.k8s_version.id, "1.19") + self.assertTrue(cluster.control_plane.high_availability) + self.assertTrue(cluster.apl_enabled) + + def test_get_pool(self): + """ + Tests that the LKENodePool object is properly generated. + """ + + pool = LKENodePool(self.client, 456, 18881) + + assert pool.id == 456 + assert pool.cluster_id == 18881 + assert pool.type.id == "g6-standard-4" + assert pool.label == "example-node-pool" + assert pool.firewall_id == 456 + assert pool.disk_encryption == InstanceDiskEncryptionType.enabled + + assert pool.disks is not None + assert pool.nodes is not None + assert pool.autoscaler is not None + assert pool.tags is not None + + assert pool.labels.foo == "bar" + assert pool.labels.bar == "foo" + + assert isinstance(pool.taints[0], LKENodePoolTaint) + assert pool.taints[0].key == "foo" + assert pool.taints[0].value == "bar" + assert pool.taints[0].effect == "NoSchedule" + + def test_cluster_dashboard_url_view(self): + """ + Tests that you can submit a correct cluster dashboard url api request. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_get("/lke/clusters/18881/dashboard") as m: + result = cluster.cluster_dashboard_url_view() + self.assertEqual(m.call_url, "/lke/clusters/18881/dashboard") + self.assertEqual(result, "https://example.dashboard.linodelke.net") + + def test_kubeconfig_delete(self): + """ + Tests that you can submit a correct kubeconfig delete api request. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_delete() as m: + cluster.kubeconfig_delete() + self.assertEqual(m.call_url, "/lke/clusters/18881/kubeconfig") + + def test_node_view(self): + """ + Tests that you can submit a correct node view api request. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_get("/lke/clusters/18881/nodes/123456") as m: + node = cluster.node_view(123456) + self.assertEqual(m.call_url, "/lke/clusters/18881/nodes/123456") + self.assertIsNotNone(node) + self.assertEqual(node.id, "123456") + self.assertEqual(node.instance_id, 456) + self.assertEqual(node.status, "ready") + + def test_node_delete(self): + """ + Tests that you can submit a correct node delete api request. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_delete() as m: + cluster.node_delete(1234) + self.assertEqual(m.call_url, "/lke/clusters/18881/nodes/1234") + + def test_node_recycle(self): + """ + Tests that you can submit a correct node recycle api request. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_post({}) as m: + cluster.node_recycle(1234) + self.assertEqual( + m.call_url, "/lke/clusters/18881/nodes/1234/recycle" + ) + + def test_cluster_nodes_recycle(self): + """ + Tests that you can submit a correct cluster nodes recycle api request. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_post({}) as m: + cluster.cluster_nodes_recycle() + self.assertEqual(m.call_url, "/lke/clusters/18881/recycle") + + def test_cluster_regenerate(self): + """ + Tests that you can submit a correct cluster regenerate api request. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_post({}) as m: + cluster.cluster_regenerate() + self.assertEqual(m.call_url, "/lke/clusters/18881/regenerate") + + def test_service_token_delete(self): + """ + Tests that you can submit a correct service token delete api request. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_delete() as m: + cluster.service_token_delete() + self.assertEqual(m.call_url, "/lke/clusters/18881/servicetoken") + + def test_load_node_pool(self): + """ + Tests that an LKE Node Pool can be retrieved using LinodeClient.load(...) + """ + pool = self.client.load(LKENodePool, 456, 18881) + + self.assertEqual(pool.id, 456) + self.assertEqual(pool.cluster_id, 18881) + self.assertEqual(pool.type.id, "g6-standard-4") + self.assertEqual(pool.label, "example-node-pool") + self.assertIsNotNone(pool.disks) + self.assertIsNotNone(pool.nodes) + self.assertIsNotNone(pool.autoscaler) + self.assertIsNotNone(pool.tags) + + def test_cluster_get_acl(self): + """ + Tests that an LKE cluster can be created with a control plane ACL configuration. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_get("lke/clusters/18881/control_plane_acl") as m: + _ = cluster.control_plane_acl + + # Get the value again to pull from cache + acl = cluster.control_plane_acl + + assert m.call_url == "/lke/clusters/18881/control_plane_acl" + assert m.method == "get" + + # Ensure the endpoint was only called once + assert m.called == 1 + + assert acl.enabled + assert acl.addresses.ipv4 == ["10.0.0.1/32"] + assert acl.addresses.ipv6 == ["1234::5678"] + + def test_cluster_put_acl(self): + """ + Tests that an LKE cluster can be created with a control plane ACL configuration. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_put("lke/clusters/18881/control_plane_acl") as m: + acl = cluster.control_plane_acl_update( + LKEClusterControlPlaneACLOptions( + addresses=LKEClusterControlPlaneACLAddressesOptions( + ipv4=["10.0.0.2/32"], + ) + ) + ) + + # Make sure the cache was updated + assert cluster.control_plane_acl.dict == acl.dict + + assert m.call_url == "/lke/clusters/18881/control_plane_acl" + assert m.method == "put" + assert m.call_data == { + "acl": { + "addresses": { + "ipv4": ["10.0.0.2/32"], + } + } + } + + assert acl.enabled + assert acl.addresses.ipv4 == ["10.0.0.1/32"] + + def test_cluster_delete_acl(self): + """ + Tests that an LKE cluster can be created with a control plane ACL configuration. + """ + cluster = LKECluster(self.client, 18881) + + with self.mock_delete() as m: + cluster.control_plane_acl_delete() + + # Make sure the cache was cleared + assert not hasattr(cluster, "_control_plane_acl") + + assert m.call_url == "/lke/clusters/18881/control_plane_acl" + assert m.method == "delete" + + # We expect a GET request to be made when accessing `control_plane_acl` + # because the cached value has been invalidated + with self.mock_get("lke/clusters/18881/control_plane_acl") as m: + cluster.control_plane_acl + + assert m.call_url == "/lke/clusters/18881/control_plane_acl" + assert m.method == "get" + + def test_lke_node_pool_update(self): + """ + Tests that an LKE Node Pool can be properly updated. + """ + pool = LKENodePool(self.client, 456, 18881) + + pool.tags = ["foobar"] + pool.count = 5 + pool.label = "testing-label" + pool.firewall_id = 852 + pool.autoscaler = { + "enabled": True, + "min": 2, + "max": 10, + } + pool.labels = {"updated-key": "updated-value"} + pool.taints = [ + LKENodePoolTaint( + key="updated-key", value="updated-value", effect="NoExecute" + ) + ] + + with self.mock_put("lke/clusters/18881/pools/456") as m: + pool.save() + + assert m.call_data == { + "tags": ["foobar"], + "count": 5, + "autoscaler": { + "enabled": True, + "min": 2, + "max": 10, + }, + "label": "testing-label", + "labels": { + "updated-key": "updated-value", + }, + "firewall_id": 852, + "taints": [ + { + "key": "updated-key", + "value": "updated-value", + "effect": "NoExecute", + } + ], + } + + def test_cluster_create_with_labels_and_taints(self): + """ + Tests that an LKE cluster can be created with labels and taints. + """ + + with self.mock_post("lke/clusters") as m: + self.client.lke.cluster_create( + "us-mia", + "test-acl-cluster", + "1.29", + [ + self.client.lke.node_pool( + "g6-nanode-1", + 3, + labels={ + "foo": "bar", + }, + taints=[ + LKENodePoolTaint( + key="a", value="b", effect="NoSchedule" + ), + {"key": "b", "value": "a", "effect": "NoSchedule"}, + ], + ) + ], + ) + + assert m.call_data["node_pools"][0] == { + "type": "g6-nanode-1", + "count": 3, + "labels": {"foo": "bar"}, + "taints": [ + {"key": "a", "value": "b", "effect": "NoSchedule"}, + {"key": "b", "value": "a", "effect": "NoSchedule"}, + ], + } + + def test_cluster_create_with_apl(self): + """ + Tests that an LKE cluster can be created with APL enabled. + """ + + with self.mock_post("lke/clusters") as m: + cluster = self.client.lke.cluster_create( + "us-mia", + "test-aapl-cluster", + "1.29", + [ + self.client.lke.node_pool( + "g6-dedicated-4", + 3, + ) + ], + apl_enabled=True, + control_plane=LKEClusterControlPlaneOptions( + high_availability=True, + ), + ) + + assert m.call_data["apl_enabled"] == True + assert m.call_data["control_plane"]["high_availability"] == True + + assert ( + cluster.apl_console_url == "https://console.lke18881.akamai-apl.net" + ) + + assert ( + cluster.apl_health_check_url + == "https://auth.lke18881.akamai-apl.net/ready" + ) + + def test_populate_with_taints(self): + """ + Tests that LKENodePool correctly handles a list of LKENodePoolTaint and Dict objects. + """ + self.client = MagicMock() + self.pool = LKENodePool(self.client, 456, 18881) + + self.pool._populate( + { + "taints": [ + LKENodePoolTaint( + key="wow", value="cool", effect="NoExecute" + ), + { + "key": "foo", + "value": "bar", + "effect": "NoSchedule", + }, + ], + } + ) + + assert len(self.pool.taints) == 2 + + assert self.pool.taints[0].dict == { + "key": "wow", + "value": "cool", + "effect": "NoExecute", + } + + assert self.pool.taints[1].dict == { + "key": "foo", + "value": "bar", + "effect": "NoSchedule", + } + + def test_populate_with_node_objects(self): + """ + Tests that LKENodePool correctly handles a list of LKENodePoolNode objects. + """ + self.client = MagicMock() + self.pool = LKENodePool(self.client, 456, 18881) + + node1 = LKENodePoolNode( + self.client, {"id": "node1", "instance_id": 101, "status": "active"} + ) + node2 = LKENodePoolNode( + self.client, + {"id": "node2", "instance_id": 102, "status": "inactive"}, + ) + self.pool._populate({"nodes": [node1, node2]}) + + self.assertEqual(len(self.pool.nodes), 2) + self.assertIsInstance(self.pool.nodes[0], LKENodePoolNode) + self.assertIsInstance(self.pool.nodes[1], LKENodePoolNode) + self.assertEqual(self.pool.nodes[0].id, "node1") + self.assertEqual(self.pool.nodes[1].id, "node2") + + def test_populate_with_node_dicts(self): + """ + Tests that LKENodePool correctly handles a list of node dictionaries. + """ + self.client = MagicMock() + self.pool = LKENodePool(self.client, 456, 18881) + + node_dict1 = {"id": "node3", "instance_id": 103, "status": "pending"} + node_dict2 = {"id": "node4", "instance_id": 104, "status": "failed"} + self.pool._populate({"nodes": [node_dict1, node_dict2]}) + + assert len(self.pool.nodes) == 2 + + assert isinstance(self.pool.nodes[0], LKENodePoolNode) + assert isinstance(self.pool.nodes[1], LKENodePoolNode) + + assert self.pool.nodes[0].id == "node3" + assert self.pool.nodes[1].id == "node4" + + def test_populate_with_node_ids(self): + """ + Tests that LKENodePool correctly handles a list of node IDs. + """ + self.client = MagicMock() + self.pool = LKENodePool(self.client, 456, 18881) + + node_id1 = "node5" + node_id2 = "node6" + + # Mock instances creation + self.client.load = MagicMock( + side_effect=[ + LKENodePoolNode( + self.client, + {"id": "node5", "instance_id": 105, "status": "active"}, + ), + LKENodePoolNode( + self.client, + {"id": "node6", "instance_id": 106, "status": "inactive"}, + ), + ] + ) + + self.pool._populate({"nodes": [node_id1, node_id2]}) + + assert len(self.pool.nodes) == 2 + + assert isinstance(self.pool.nodes[0], LKENodePoolNode) + assert isinstance(self.pool.nodes[1], LKENodePoolNode) + + assert self.pool.nodes[0].id == "node5" + assert self.pool.nodes[1].id == "node6" + + def test_populate_with_mixed_types(self): + """ + Tests that LKENodePool correctly handles a mixed list of node objects, dicts, and IDs. + """ + self.client = MagicMock() + self.pool = LKENodePool(self.client, 456, 18881) + + node1 = LKENodePoolNode( + self.client, {"id": "node7", "instance_id": 107, "status": "active"} + ) + node_dict = {"id": "node8", "instance_id": 108, "status": "inactive"} + node_id = "node9" + # Mock instances creation + self.client.load = MagicMock( + side_effect=[ + LKENodePoolNode( + self.client, + {"id": "node9", "instance_id": 109, "status": "pending"}, + ) + ] + ) + self.pool._populate({"nodes": [node1, node_dict, node_id]}) + + assert len(self.pool.nodes) == 3 + assert isinstance(self.pool.nodes[0], LKENodePoolNode) + assert isinstance(self.pool.nodes[1], LKENodePoolNode) + assert isinstance(self.pool.nodes[2], LKENodePoolNode) + assert self.pool.nodes[0].id == "node7" + assert self.pool.nodes[1].id == "node8" + assert self.pool.nodes[2].id == "node9" + + def test_cluster_create_acl_null_addresses(self): + with self.mock_post("lke/clusters") as m: + self.client.lke.cluster_create( + region="us-mia", + label="foobar", + kube_version="1.32", + node_pools=[self.client.lke.node_pool("g6-standard-1", 3)], + control_plane={ + "acl": { + "enabled": False, + "addresses": None, + } + }, + ) + + # Addresses should not be included in the API request if it's null + # See: TPT-3489 + assert m.call_data["control_plane"] == { + "acl": { + "enabled": False, + } + } + + def test_cluster_update_acl_null_addresses(self): + cluster = LKECluster(self.client, 18881) + + with self.mock_put("lke/clusters/18881/control_plane_acl") as m: + cluster.control_plane_acl_update( + { + "enabled": True, + "addresses": None, + } + ) + + # Addresses should not be included in the API request if it's null + # See: TPT-3489 + assert m.call_data == {"acl": {"enabled": True}} + + def test_cluster_enterprise(self): + cluster = LKECluster(self.client, 18882) + + assert cluster.tier == "enterprise" + assert cluster.k8s_version.id == "1.31.1+lke1" + + pool = LKENodePool(self.client, 789, 18882) + assert pool.k8s_version == "1.31.1+lke1" + assert pool.update_strategy == "rolling_update" + assert pool.label == "enterprise-node-pool" + assert pool.firewall_id == 789 + + def test_lke_tiered_version(self): + version = TieredKubeVersion(self.client, "1.32", "standard") + + assert version.id == "1.32" + + # Ensure the version is properly refreshed + version.invalidate() + + assert version.id == "1.32" diff --git a/test/unit/objects/lock_test.py b/test/unit/objects/lock_test.py new file mode 100644 index 000000000..ce630d0b6 --- /dev/null +++ b/test/unit/objects/lock_test.py @@ -0,0 +1,34 @@ +from test.unit.base import ClientBaseCase + +from linode_api4.objects.lock import Lock, LockEntity + + +class LockTest(ClientBaseCase): + """ + Tests methods of the Lock class + """ + + def test_get_lock(self): + """ + Tests that a lock is loaded correctly by ID + """ + lock = Lock(self.client, 1) + + self.assertEqual(lock.id, 1) + self.assertEqual(lock.lock_type, "cannot_delete") + self.assertIsInstance(lock.entity, LockEntity) + self.assertEqual(lock.entity.id, 123) + self.assertEqual(lock.entity.type, "linode") + self.assertEqual(lock.entity.label, "test-linode") + self.assertEqual(lock.entity.url, "/v4/linode/instances/123") + + def test_delete_lock(self): + """ + Tests that a lock can be deleted using the Lock object's delete method + """ + lock = Lock(self.client, 1) + + with self.mock_delete() as m: + lock.delete() + + self.assertEqual(m.call_url, "/locks/1") diff --git a/test/unit/objects/longview_test.py b/test/unit/objects/longview_test.py new file mode 100644 index 000000000..10f3388eb --- /dev/null +++ b/test/unit/objects/longview_test.py @@ -0,0 +1,99 @@ +from datetime import datetime +from test.unit.base import ClientBaseCase + +from linode_api4.objects import ( + LongviewClient, + LongviewPlan, + LongviewSubscription, +) +from linode_api4.objects.base import MappedObject + + +class LongviewPlanTest(ClientBaseCase): + """ + Tests methods of the LongviewPlan class + """ + + def test_get_plan(self): + """ + Tests that a plan is loaded correctly + """ + plan = LongviewPlan(self.client, "longview-10") + + self.assertEqual(plan.id, "longview-10") + self.assertEqual(plan.clients_included, 10) + self.assertEqual(plan.label, "Longview Pro 10 pack") + self.assertIsNotNone(plan.price) + + +class LongviewClientTest(ClientBaseCase): + """ + Tests methods of the LongviewClient class + """ + + def test_get_client(self): + """ + Tests that a client is loaded correctly by ID + """ + client = LongviewClient(self.client, 1234) + self.assertEqual(client._populated, False) + + self.assertEqual(client.label, "test_client_1") + self.assertEqual(client._populated, True) + + self.assertIsInstance(client.created, datetime) + self.assertIsInstance(client.updated, datetime) + + self.assertIsInstance(client.apps, MappedObject) + self.assertFalse(client.apps.nginx) + self.assertFalse(client.apps.mysql) + self.assertFalse(client.apps.apache) + + self.assertEqual( + client.install_code, "12345678-ABCD-EF01-23456789ABCDEF12" + ) + self.assertEqual(client.api_key, "12345678-ABCD-EF01-23456789ABCDEF12") + + def test_update_label(self): + """ + Tests that updating a client's label contacts the api correctly. + """ + with self.mock_put("longview/clients/1234") as m: + client = LongviewClient(self.client, 1234) + client.label = "updated" + client.save() + + self.assertEqual(m.call_url, "/longview/clients/1234") + self.assertEqual(m.call_data, {"label": "updated"}) + + def test_delete_client(self): + """ + Tests that deleting a client creates the correct api request. + """ + with self.mock_delete() as m: + client = LongviewClient(self.client, 1234) + client.delete() + + self.assertEqual(m.call_url, "/longview/clients/1234") + + +class LongviewSubscriptionTest(ClientBaseCase): + """ + Tests methods of the LongviewSubscription class + """ + + def test_get_subscription(self): + """ + Tests that a subscription is loaded correctly by ID + """ + sub = LongviewSubscription(self.client, "longview-40") + self.assertEqual(sub._populated, False) + + self.assertEqual(sub.label, "Longview Pro 40 pack") + self.assertEqual(sub._populated, True) + + self.assertEqual(sub.clients_included, 40) + + self.assertIsInstance(sub.price, MappedObject) + self.assertEqual(sub.price.hourly, 0.15) + self.assertEqual(sub.price.monthly, 100) diff --git a/test/unit/objects/mapped_object_test.py b/test/unit/objects/mapped_object_test.py new file mode 100644 index 000000000..ac2448a4a --- /dev/null +++ b/test/unit/objects/mapped_object_test.py @@ -0,0 +1,64 @@ +from dataclasses import dataclass +from test.unit.base import ClientBaseCase + +from linode_api4.objects import Base, JSONObject, MappedObject, Property + + +class MappedObjectCase(ClientBaseCase): + def test_mapped_object_dict(self): + test_dict = { + "key1": 1, + "key2": "2", + "key3": 3.3, + "key4": [41, "42", {"key4-3": "43"}], + "key5": { + "key5-1": 1, + "key5-2": {"key5-2-1": {"key5-2-1-1": 1}}, + "key5-3": [{"key5-3-1": 531}, {"key5-3-2": 532}], + }, + } + + mapped_obj = MappedObject(**test_dict) + self.assertEqual(mapped_obj.dict, test_dict) + + def test_serialize_base_objects(self): + test_property_name = "bar" + test_property_value = "bar" + + class Foo(Base): + api_endpoint = "/testmappedobj1" + id_attribute = test_property_name + properties = { + test_property_name: Property(mutable=True), + } + + foo = Foo(self.client, test_property_value) + foo._api_get() + + expected_dict = { + "foo": { + test_property_name: test_property_value, + } + } + + mapped_obj = MappedObject(foo=foo) + self.assertEqual(mapped_obj.dict, expected_dict) + + def test_serialize_json_objects(self): + test_property_name = "bar" + test_property_value = "bar" + + @dataclass + class Foo(JSONObject): + bar: str = "" + + foo = Foo.from_json({test_property_name: test_property_value}) + + expected_dict = { + "foo": { + test_property_name: test_property_value, + } + } + + mapped_obj = MappedObject(foo=foo) + self.assertEqual(mapped_obj.dict, expected_dict) diff --git a/test/unit/objects/monitor_test.py b/test/unit/objects/monitor_test.py new file mode 100644 index 000000000..329a09063 --- /dev/null +++ b/test/unit/objects/monitor_test.py @@ -0,0 +1,148 @@ +import datetime +from test.unit.base import ClientBaseCase + +from linode_api4.objects import MonitorDashboard, MonitorService + + +class MonitorTest(ClientBaseCase): + """ + Tests the methods of MonitorServiceSupported class + """ + + def test_supported_services(self): + """ + Test the services supported by monitor + """ + service = self.client.monitor.services() + self.assertEqual(len(service), 1) + self.assertEqual(service[0].label, "Databases") + self.assertEqual(service[0].service_type, "dbaas") + + def test_dashboard_by_ID(self): + """ + Test the dashboard by ID API + """ + dashboard = self.client.load(MonitorDashboard, 1) + self.assertEqual(dashboard.type, "standard") + self.assertEqual( + dashboard.created, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboard.id, 1) + self.assertEqual(dashboard.label, "Resource Usage") + self.assertEqual(dashboard.service_type, "dbaas") + self.assertEqual( + dashboard.updated, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboard.widgets[0].aggregate_function, "sum") + self.assertEqual(dashboard.widgets[0].chart_type, "area") + self.assertEqual(dashboard.widgets[0].color, "default") + self.assertEqual(dashboard.widgets[0].label, "CPU Usage") + self.assertEqual(dashboard.widgets[0].metric, "cpu_usage") + self.assertEqual(dashboard.widgets[0].size, 12) + self.assertEqual(dashboard.widgets[0].unit, "%") + self.assertEqual(dashboard.widgets[0].y_label, "cpu_usage") + self.assertEqual(dashboard.widgets[0].group_by, ["entity_id"]) + self.assertIsNone(dashboard.widgets[0].filters) + + def test_dashboard_by_service_type(self): + dashboards = self.client.monitor.dashboards(service_type="dbaas") + self.assertEqual(dashboards[0].type, "standard") + self.assertEqual( + dashboards[0].created, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboards[0].id, 1) + self.assertEqual(dashboards[0].label, "Resource Usage") + self.assertEqual(dashboards[0].service_type, "dbaas") + self.assertEqual( + dashboards[0].updated, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboards[0].widgets[0].aggregate_function, "sum") + self.assertEqual(dashboards[0].widgets[0].chart_type, "area") + self.assertEqual(dashboards[0].widgets[0].color, "default") + self.assertEqual(dashboards[0].widgets[0].label, "CPU Usage") + self.assertEqual(dashboards[0].widgets[0].metric, "cpu_usage") + self.assertEqual(dashboards[0].widgets[0].size, 12) + self.assertEqual(dashboards[0].widgets[0].unit, "%") + self.assertEqual(dashboards[0].widgets[0].y_label, "cpu_usage") + self.assertEqual(dashboards[0].widgets[0].group_by, ["entity_id"]) + self.assertIsNone(dashboards[0].widgets[0].filters) + + # Test the second widget which has filters + self.assertEqual(dashboards[0].widgets[1].label, "Memory Usage") + self.assertEqual(dashboards[0].widgets[1].group_by, ["entity_id"]) + self.assertIsNotNone(dashboards[0].widgets[1].filters) + self.assertEqual(len(dashboards[0].widgets[1].filters), 1) + self.assertEqual( + dashboards[0].widgets[1].filters[0].dimension_label, "pattern" + ) + self.assertEqual(dashboards[0].widgets[1].filters[0].operator, "in") + self.assertEqual( + dashboards[0].widgets[1].filters[0].value, "publicout,privateout" + ) + + def test_get_all_dashboards(self): + dashboards = self.client.monitor.dashboards() + self.assertEqual(dashboards[0].type, "standard") + self.assertEqual( + dashboards[0].created, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboards[0].id, 1) + self.assertEqual(dashboards[0].label, "Resource Usage") + self.assertEqual(dashboards[0].service_type, "dbaas") + self.assertEqual( + dashboards[0].updated, datetime.datetime(2024, 10, 10, 5, 1, 58) + ) + self.assertEqual(dashboards[0].widgets[0].aggregate_function, "sum") + self.assertEqual(dashboards[0].widgets[0].chart_type, "area") + self.assertEqual(dashboards[0].widgets[0].color, "default") + self.assertEqual(dashboards[0].widgets[0].label, "CPU Usage") + self.assertEqual(dashboards[0].widgets[0].metric, "cpu_usage") + self.assertEqual(dashboards[0].widgets[0].size, 12) + self.assertEqual(dashboards[0].widgets[0].unit, "%") + self.assertEqual(dashboards[0].widgets[0].y_label, "cpu_usage") + self.assertEqual(dashboards[0].widgets[0].group_by, ["entity_id"]) + self.assertIsNone(dashboards[0].widgets[0].filters) + + def test_specific_service_details(self): + data = self.client.load(MonitorService, "dbaas") + self.assertEqual(data.label, "Databases") + self.assertEqual(data.service_type, "dbaas") + + # Test alert configuration + self.assertIsNotNone(data.alert) + self.assertEqual(data.alert.polling_interval_seconds, [300]) + self.assertEqual(data.alert.evaluation_period_seconds, [300]) + self.assertEqual(data.alert.scope, ["entity"]) + + def test_metric_definitions(self): + + metrics = self.client.monitor.metric_definitions(service_type="dbaas") + self.assertEqual( + metrics[0].available_aggregate_functions, + ["max", "avg", "min", "sum"], + ) + self.assertTrue(metrics[0].is_alertable) + self.assertEqual(metrics[0].label, "CPU Usage") + self.assertEqual(metrics[0].metric, "cpu_usage") + self.assertEqual(metrics[0].metric_type, "gauge") + self.assertEqual(metrics[0].scrape_interval, "60s") + self.assertEqual(metrics[0].unit, "percent") + self.assertEqual(metrics[0].dimensions[0].dimension_label, "node_type") + self.assertEqual(metrics[0].dimensions[0].label, "Node Type") + self.assertEqual( + metrics[0].dimensions[0].values, ["primary", "secondary"] + ) + + def test_create_token(self): + + with self.mock_post("/monitor/services/dbaas/token") as m: + self.client.monitor.create_token( + service_type="dbaas", entity_ids=[189690, 188020] + ) + self.assertEqual(m.return_dct["token"], "abcdefhjigkfghh") + + with self.mock_post("/monitor/services/linode/token") as m: + self.client.monitor.create_token( + service_type="linode", entity_ids=["compute-instance-1"] + ) + self.assertEqual(m.return_dct["token"], "abcdefhjigkfghh") diff --git a/test/unit/objects/networking_test.py b/test/unit/objects/networking_test.py new file mode 100644 index 000000000..cd2e1b15e --- /dev/null +++ b/test/unit/objects/networking_test.py @@ -0,0 +1,173 @@ +from test.unit.base import ClientBaseCase + +from linode_api4 import VLAN, ExplicitNullValue, Instance, Region +from linode_api4.objects import Firewall, IPAddress, IPv6Range + + +class NetworkingTest(ClientBaseCase): + """ + Tests methods of the Networking class + """ + + def test_get_ipv6_range(self): + """ + Tests that the IPv6Range object is properly generated. + """ + + ipv6Range = IPv6Range(self.client, "2600:3c01::") + ipv6Range._api_get() + + self.assertEqual(ipv6Range.range, "2600:3c01::") + self.assertEqual(ipv6Range.prefix, 64) + self.assertEqual(ipv6Range.region.id, "us-east") + self.assertEqual(ipv6Range.linodes[0], 123) + self.assertEqual(ipv6Range.is_bgp, False) + + ranges = self.client.networking.ipv6_ranges() + + self.assertEqual(ranges[0].range, "2600:3c01::") + self.assertEqual(ranges[0].prefix, 64) + self.assertEqual(ranges[0].region.id, "us-east") + self.assertEqual( + ranges[0].route_target, "2600:3c01::ffff:ffff:ffff:ffff" + ) + + def test_get_rules(self): + """ + Tests that you can submit a correct firewall rules view api request. + """ + + firewall = Firewall(self.client, 123) + + with self.mock_get("/networking/firewalls/123/rules") as m: + result = firewall.get_rules() + self.assertEqual(m.call_url, "/networking/firewalls/123/rules") + self.assertEqual(result["inbound"], []) + self.assertEqual(result["outbound"], []) + self.assertEqual(result["inbound_policy"], "DROP") + self.assertEqual(result["outbound_policy"], "DROP") + + def test_get_rule_versions(self): + """ + Tests that you can submit a correct firewall rule versions view api request. + """ + + firewall = Firewall(self.client, 123) + + with self.mock_get("/networking/firewalls/123/history") as m: + result = firewall.rule_versions + self.assertEqual(m.call_url, "/networking/firewalls/123/history") + self.assertEqual(result["data"][0]["status"], "enabled") + self.assertEqual(result["data"][0]["rules"]["version"], 1) + self.assertEqual(result["data"][0]["status"], "enabled") + self.assertEqual(result["data"][1]["rules"]["version"], 2) + + def test_get_rule_version(self): + """ + Tests that you can submit a correct firewall rule version view api request. + """ + + firewall = Firewall(self.client, 123) + + with self.mock_get("/networking/firewalls/123/history/rules/2") as m: + result = firewall.get_rule_version(2) + self.assertEqual( + m.call_url, "/networking/firewalls/123/history/rules/2" + ) + self.assertEqual(result["inbound"][0]["action"], "ACCEPT") + self.assertEqual( + result["inbound"][0]["addresses"]["ipv4"][0], "0.0.0.0/0" + ) + self.assertEqual( + result["inbound"][0]["addresses"]["ipv6"][0], "ff00::/8" + ) + self.assertEqual( + result["inbound"][0]["description"], + "A really cool firewall rule.", + ) + self.assertEqual( + result["inbound"][0]["label"], "really-cool-firewall-rule" + ) + self.assertEqual(result["inbound"][0]["ports"], "80") + self.assertEqual(result["inbound"][0]["protocol"], "TCP") + self.assertEqual(result["outbound"], []) + self.assertEqual(result["inbound_policy"], "ACCEPT") + self.assertEqual(result["outbound_policy"], "DROP") + self.assertEqual(result["version"], 2) + + def test_rdns_reset(self): + """ + Tests that the RDNS of an IP and be reset using an explicit null value. + """ + + ip = IPAddress(self.client, "127.0.0.1") + + with self.mock_put("/networking/ips/127.0.0.1") as m: + ip.rdns = ExplicitNullValue() + ip.save() + + self.assertEqual(m.call_url, "/networking/ips/127.0.0.1") + + # We need to assert of call_data_raw because + # call_data drops keys with null values + self.assertEqual(m.call_data_raw, '{"rdns": null}') + + # Ensure that everything works as expected with a class reference + with self.mock_put("/networking/ips/127.0.0.1") as m: + ip.rdns = ExplicitNullValue + ip.save() + + self.assertEqual(m.call_url, "/networking/ips/127.0.0.1") + + self.assertEqual(m.call_data_raw, '{"rdns": null}') + + def test_get_ip(self): + """ + Tests retrieving comprehensive IP address information, including all relevant properties. + """ + + ip = IPAddress(self.client, "127.0.0.1") + + def __validate_ip(_ip: IPAddress): + assert _ip.address == "127.0.0.1" + assert _ip.gateway == "127.0.0.1" + assert _ip.linode_id == 123 + assert _ip.interface_id == 456 + assert _ip.prefix == 24 + assert _ip.public + assert _ip.rdns == "test.example.org" + assert _ip.region.id == "us-east" + assert _ip.subnet_mask == "255.255.255.0" + assert _ip.type == "ipv4" + assert _ip.vpc_nat_1_1.vpc_id == 242 + assert _ip.vpc_nat_1_1.subnet_id == 194 + assert _ip.vpc_nat_1_1.address == "139.144.244.36" + + __validate_ip(ip) + ip.invalidate() + __validate_ip(ip) + + def test_delete_ip(self): + """ + Tests that deleting an IP creates the correct api request + """ + with self.mock_delete() as m: + ip = IPAddress(self.client, "127.0.0.1") + ip.to(Instance(self.client, 123)) + ip.delete() + + self.assertEqual(m.call_url, "/linode/instances/123/ips/127.0.0.1") + + def test_delete_vlan(self): + """ + Tests that deleting a VLAN creates the correct api request + """ + with self.mock_delete() as m: + self.client.networking.delete_vlan( + VLAN(self.client, "vlan-test"), + Region(self.client, "us-southeast"), + ) + + self.assertEqual( + m.call_url, "/networking/vlans/us-southeast/vlan-test" + ) diff --git a/test/unit/objects/nodebalancers_test.py b/test/unit/objects/nodebalancers_test.py new file mode 100644 index 000000000..c02b40ea3 --- /dev/null +++ b/test/unit/objects/nodebalancers_test.py @@ -0,0 +1,268 @@ +from test.unit.base import ClientBaseCase + +from linode_api4.objects import ( + NodeBalancer, + NodeBalancerConfig, + NodeBalancerNode, +) + + +class NodeBalancerConfigTest(ClientBaseCase): + """ + Tests methods of the NodeBalancerConfig class + """ + + def test_get_config(self): + """ + Tests that a config is loaded correctly by ID + """ + config = NodeBalancerConfig(self.client, 65432, 123456) + self.assertEqual(config._populated, False) + + self.assertEqual(config.port, 80) + self.assertEqual(config._populated, True) + + self.assertEqual(config.check, "connection") + self.assertEqual(config.protocol, "http") + self.assertEqual(config.check_attempts, 2) + self.assertEqual(config.stickiness, "table") + self.assertEqual(config.check_interval, 5) + self.assertEqual(config.check_body, "") + self.assertEqual(config.check_passive, True) + self.assertEqual(config.algorithm, "roundrobin") + self.assertEqual(config.check_timeout, 3) + self.assertEqual(config.check_path, "/") + self.assertEqual(config.ssl_cert, None) + self.assertEqual(config.ssl_commonname, "") + self.assertEqual(config.nodebalancer_id, 123456) + self.assertEqual(config.cipher_suite, "recommended") + self.assertEqual(config.ssl_key, None) + self.assertEqual(config.nodes_status.up, 0) + self.assertEqual(config.nodes_status.down, 0) + self.assertEqual(config.ssl_fingerprint, "") + self.assertEqual(config.proxy_protocol, "none") + + config_udp = NodeBalancerConfig(self.client, 65431, 123456) + self.assertEqual(config_udp.protocol, "udp") + self.assertEqual(config_udp.udp_check_port, 12345) + + def test_update_config_udp(self): + """ + Tests that a config with a protocol of udp can be updated and that cipher suite is properly excluded in save() + """ + with self.mock_put("nodebalancers/123456/configs/65431") as m: + config = self.client.load(NodeBalancerConfig, 65431, 123456) + config.udp_check_port = 54321 + config.save() + + self.assertEqual(m.call_url, "/nodebalancers/123456/configs/65431") + self.assertEqual(m.call_data["udp_check_port"], 54321) + self.assertNotIn("cipher_suite", m.call_data) + + +class NodeBalancerNodeTest(ClientBaseCase): + """ + Tests methods of the NodeBalancerNode class + """ + + def test_get_node(self): + """ + Tests that a node is loaded correctly by ID + """ + node = NodeBalancerNode(self.client, 54321, (65432, 123456)) + self.assertEqual(node._populated, False) + + self.assertEqual(node.weight, 50) + self.assertEqual(node._populated, True) + + self.assertEqual(node.id, 54321) + self.assertEqual(node.address, "192.168.210.120") + self.assertEqual(node.label, "node54321") + self.assertEqual(node.status, "UP") + self.assertEqual(node.mode, "accept") + self.assertEqual(node.config_id, 65432) + self.assertEqual(node.nodebalancer_id, 123456) + + node_udp = NodeBalancerNode(self.client, 12345, (65432, 123456)) + self.assertEqual(node_udp.mode, "none") + + def test_create_node(self): + """ + Tests that a node can be created + """ + with self.mock_post( + "nodebalancers/123456/configs/65432/nodes/54321" + ) as m: + config = NodeBalancerConfig(self.client, 65432, 123456) + node = config.node_create( + "node54321", "192.168.210.120", weight=50, mode="accept" + ) + + self.assertIsNotNone(node) + self.assertEqual(node.id, 54321) + self.assertEqual( + m.call_url, "/nodebalancers/123456/configs/65432/nodes" + ) + self.assertEqual( + m.call_data, + { + "label": "node54321", + "address": "192.168.210.120", + "weight": 50, + "mode": "accept", + }, + ) + + def test_update_node(self): + """ + Tests that a node can be updated + """ + with self.mock_put( + "nodebalancers/123456/configs/65432/nodes/54321" + ) as m: + node = self.client.load(NodeBalancerNode, 54321, (65432, 123456)) + node.label = "ThisNewLabel" + node.weight = 60 + node.mode = "drain" + node.address = "192.168.210.121" + node.save() + + self.assertEqual( + m.call_url, "/nodebalancers/123456/configs/65432/nodes/54321" + ) + self.assertEqual( + m.call_data, + { + "label": "ThisNewLabel", + "address": "192.168.210.121", + "mode": "drain", + "weight": 60, + }, + ) + + def test_delete_node(self): + """ + Tests that deleting a node creates the correct api request. + """ + with self.mock_delete() as m: + node = NodeBalancerNode(self.client, 54321, (65432, 123456)) + node.delete() + + self.assertEqual( + m.call_url, "/nodebalancers/123456/configs/65432/nodes/54321" + ) + + +class NodeBalancerTest(ClientBaseCase): + def test_update(self): + """ + Test that you can update a NodeBalancer. + """ + nb = NodeBalancer(self.client, 123456) + nb.label = "updated-label" + nb.client_conn_throttle = 7 + nb.tags = ["foo", "bar"] + + with self.mock_put("nodebalancers/123456") as m: + nb.save() + self.assertEqual(m.call_url, "/nodebalancers/123456") + self.assertEqual( + m.call_data, + { + "label": "updated-label", + "client_conn_throttle": 7, + "tags": ["foo", "bar"], + }, + ) + + def test_locks_not_in_put(self): + """ + Test that locks are not included in PUT request when updating a NodeBalancer. + Locks are managed through the separate /v4/locks endpoint. + """ + nb = NodeBalancer(self.client, 123456) + # Access locks to ensure it's loaded + self.assertEqual(nb.locks, ["cannot_delete_with_subresources"]) + + nb.label = "new-label" + + with self.mock_put("nodebalancers/123456") as m: + nb.save() + self.assertEqual(m.call_url, "/nodebalancers/123456") + # Verify locks is NOT in the PUT data + self.assertNotIn("locks", m.call_data) + self.assertEqual(m.call_data["label"], "new-label") + + def test_firewalls(self): + """ + Test that you can get the firewalls for the requested NodeBalancer. + """ + nb = NodeBalancer(self.client, 12345) + firewalls_url = "/nodebalancers/12345/firewalls" + + with self.mock_get(firewalls_url) as m: + result = nb.firewalls() + self.assertEqual(m.call_url, firewalls_url) + self.assertEqual(len(result), 1) + + def test_config_rebuild(self): + """ + Test that you can rebuild the cofig of a node balancer. + """ + config_rebuild_url = "/nodebalancers/12345/configs/4567/rebuild" + with self.mock_post(config_rebuild_url) as m: + nb = NodeBalancer(self.client, 12345) + nodes = [ + { + "id": 54321, + "address": "192.168.210.120:80", + "label": "node1", + "weight": 50, + "mode": "accept", + } + ] + + result = nb.config_rebuild( + 4567, + nodes, + port=1234, + protocol="https", + algorithm="roundrobin", + ) + self.assertIsNotNone(result) + self.assertEqual(result.id, 4567) + self.assertEqual(result.nodebalancer_id, 12345) + self.assertEqual(m.call_url, config_rebuild_url) + self.assertEqual( + m.call_data, + { + "port": 1234, + "protocol": "https", + "algorithm": "roundrobin", + "nodes": [ + { + "id": 54321, + "address": "192.168.210.120:80", + "label": "node1", + "weight": 50, + "mode": "accept", + }, + ], + }, + ) + + def test_statistics(self): + """ + Test that you can get the statistics about the requested NodeBalancer. + """ + statistics_url = "/nodebalancers/12345/stats" + with self.mock_get(statistics_url) as m: + nb = NodeBalancer(self.client, 12345) + result = nb.statistics() + + self.assertIsNotNone(result) + self.assertEqual( + result.title, + "linode.com - balancer12345 (12345) - day (5 min avg)", + ) + self.assertEqual(m.call_url, statistics_url) diff --git a/test/unit/objects/object_storage_test.py b/test/unit/objects/object_storage_test.py new file mode 100644 index 000000000..b7ff7e49c --- /dev/null +++ b/test/unit/objects/object_storage_test.py @@ -0,0 +1,337 @@ +from datetime import datetime +from test.unit.base import ClientBaseCase + +from linode_api4 import ObjectStorageEndpointType +from linode_api4.objects import ( + ObjectStorageACL, + ObjectStorageBucket, + ObjectStorageCluster, + ObjectStorageQuota, +) + + +class ObjectStorageTest(ClientBaseCase): + """ + Test the methods of the ObjectStorage + """ + + def test_object_storage_bucket_api_get(self): + object_storage_bucket_api_get_url = ( + "/object-storage/buckets/us-east-1/example-bucket" + ) + with self.mock_get(object_storage_bucket_api_get_url) as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east-1" + ) + self.assertEqual(object_storage_bucket.cluster, "us-east-1") + self.assertEqual(object_storage_bucket.label, "example-bucket") + self.assertEqual( + object_storage_bucket.created, + datetime( + year=2019, month=1, day=1, hour=1, minute=23, second=45 + ), + ) + self.assertEqual( + object_storage_bucket.hostname, + "example-bucket.us-east-1.linodeobjects.com", + ) + self.assertEqual(object_storage_bucket.objects, 4) + self.assertEqual(object_storage_bucket.size, 188318981) + self.assertEqual( + object_storage_bucket.endpoint_type, + ObjectStorageEndpointType.E1, + ) + self.assertEqual( + object_storage_bucket.s3_endpoint, + "us-east-12.linodeobjects.com", + ) + self.assertEqual(m.call_url, object_storage_bucket_api_get_url) + + def test_object_storage_bucket_delete(self): + object_storage_bucket_delete_url = ( + "/object-storage/buckets/us-east-1/example-bucket" + ) + with self.mock_delete() as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east-1" + ) + object_storage_bucket.delete() + self.assertEqual(m.call_url, object_storage_bucket_delete_url) + + def test_bucket_access_get(self): + bucket_access_get_url = ( + "/object-storage/buckets/us-east/example-bucket/access" + ) + with self.mock_get(bucket_access_get_url) as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east" + ) + result = object_storage_bucket.access_get() + self.assertIsNotNone(result) + self.assertEqual(m.call_url, bucket_access_get_url) + self.assertEqual(result.acl, "authenticated-read") + self.assertEqual(result.cors_enabled, True) + self.assertEqual(result.acl_xml, "...") + + def test_bucket_access_modify(self): + """ + Test that you can modify bucket access settings. + """ + bucket_access_modify_url = ( + "/object-storage/buckets/us-east/example-bucket/access" + ) + with self.mock_post({}) as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east" + ) + object_storage_bucket.access_modify(ObjectStorageACL.PRIVATE, True) + self.assertEqual( + m.call_data, + { + "acl": "private", + "cors_enabled": True, + }, + ) + self.assertEqual(m.call_url, bucket_access_modify_url) + + def test_bucket_access_update(self): + """ + Test that you can update bucket access settings. + """ + bucket_access_update_url = ( + "/object-storage/buckets/us-east-1/example-bucket/access" + ) + with self.mock_put({}) as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east-1" + ) + object_storage_bucket.access_update(ObjectStorageACL.PRIVATE, True) + self.assertEqual( + m.call_data, + { + "acl": "private", + "cors_enabled": True, + }, + ) + self.assertEqual(m.call_url, bucket_access_update_url) + + def test_buckets_in_cluster(self): + """ + Test that Object Storage Buckets in a specified cluster can be reterived + """ + buckets_in_cluster_url = "/object-storage/buckets/us-east-1" + with self.mock_get(buckets_in_cluster_url) as m: + cluster = ObjectStorageCluster(self.client, "us-east-1") + buckets = cluster.buckets_in_cluster() + self.assertIsNotNone(buckets) + bucket = buckets[0] + + self.assertEqual(m.call_url, buckets_in_cluster_url) + self.assertEqual(bucket.cluster, "us-east-1") + self.assertEqual( + bucket.created, + datetime( + year=2019, month=1, day=1, hour=1, minute=23, second=45 + ), + ) + self.assertEqual( + bucket.hostname, "example-bucket.us-east-1.linodeobjects.com" + ) + self.assertEqual(bucket.label, "example-bucket") + self.assertEqual(bucket.objects, 4) + self.assertEqual(bucket.size, 188318981) + self.assertEqual(bucket.endpoint_type, ObjectStorageEndpointType.E1) + self.assertEqual(bucket.s3_endpoint, "us-east-12.linodeobjects.com") + + def test_ssl_cert_delete(self): + """ + Test that you can delete the TLS/SSL certificate and private key of a bucket. + """ + ssl_cert_delete_url = ( + "/object-storage/buckets/us-east-1/example-bucket/ssl" + ) + with self.mock_delete() as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east-1" + ) + object_storage_bucket.ssl_cert_delete() + self.assertEqual(m.call_url, ssl_cert_delete_url) + + def test_ssl_cert(self): + """ + Test tha you can get a bool value indicating if this bucket + has a corresponding TLS/SSL certificate. + """ + ssl_cert_url = "/object-storage/buckets/us-east-1/example-bucket/ssl" + with self.mock_get(ssl_cert_url) as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east-1" + ) + result = object_storage_bucket.ssl_cert() + self.assertIsNotNone(result) + self.assertEqual(m.call_url, ssl_cert_url) + self.assertEqual(result.ssl, True) + + def test_ssl_cert_upload(self): + """ + Test that you can upload a TLS/SSL cert. + """ + ssl_cert_upload_url = ( + "/object-storage/buckets/us-east-1/example-bucket/ssl" + ) + with self.mock_post(ssl_cert_upload_url) as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east-1" + ) + result = object_storage_bucket.ssl_cert_upload( + "-----BEGIN CERTIFICATE-----\nCERTIFICATE_INFORMATION\n-----END CERTIFICATE-----", + "-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY_INFORMATION\n-----END PRIVATE KEY-----", + ) + self.assertIsNotNone(result) + self.assertEqual(m.call_url, ssl_cert_upload_url) + self.assertEqual(result.ssl, True) + self.assertEqual( + m.call_data, + { + "certificate": "-----BEGIN CERTIFICATE-----\nCERTIFICATE_INFORMATION\n-----END CERTIFICATE-----", + "private_key": "-----BEGIN PRIVATE KEY-----\nPRIVATE_KEY_INFORMATION\n-----END PRIVATE KEY-----", + }, + ) + + def test_contents(self): + """ + Test that you can get the contents of a bucket. + """ + bucket_contents_url = ( + "/object-storage/buckets/us-east-1/example-bucket/object-list" + ) + with self.mock_get(bucket_contents_url) as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east-1" + ) + contents = object_storage_bucket.contents() + self.assertIsNotNone(contents) + content = contents[0] + + self.assertEqual(m.call_url, bucket_contents_url) + self.assertEqual(content.etag, "9f254c71e28e033bf9e0e5262e3e72ab") + self.assertEqual(content.is_truncated, True) + self.assertEqual(content.last_modified, "2019-01-01T01:23:45") + self.assertEqual(content.name, "example") + self.assertEqual( + content.next_marker, + "bd021c21-e734-4823-97a4-58b41c2cd4c8.892602.184", + ) + self.assertEqual( + content.owner, "bfc70ab2-e3d4-42a4-ad55-83921822270c" + ) + self.assertEqual(content.size, 123) + self.assertEqual( + m.call_data, + { + "page_size": 100, + }, + ) + + def test_object_acl_config(self): + """ + Test that you can view an Objectโ€™s configured Access Control List (ACL) in this Object Storage bucket. + """ + object_acl_config_url = ( + "/object-storage/buckets/us-east-1/example-bucket/object-acl" + ) + with self.mock_get(object_acl_config_url) as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east-1" + ) + acl = object_storage_bucket.object_acl_config("example") + self.assertEqual(m.call_url, object_acl_config_url) + self.assertEqual(acl.acl, "public-read") + self.assertEqual( + acl.acl_xml, "..." + ) + self.assertEqual( + m.call_data, + { + "name": "example", + }, + ) + + def test_object_acl_config_update(self): + """ + Test that you can update an Objectโ€™s configured Access Control List (ACL) in this Object Storage bucket. + """ + object_acl_config_update_url = ( + "/object-storage/buckets/us-east-1/example-bucket/object-acl" + ) + with self.mock_put(object_acl_config_update_url) as m: + object_storage_bucket = ObjectStorageBucket( + self.client, "example-bucket", "us-east-1" + ) + acl = object_storage_bucket.object_acl_config_update( + ObjectStorageACL.PUBLIC_READ, + "example", + ) + self.assertEqual(m.call_url, object_acl_config_update_url) + self.assertEqual(acl.acl, "public-read") + self.assertEqual( + acl.acl_xml, "..." + ) + self.assertEqual( + m.call_data, + { + "acl": "public-read", + "name": "example", + }, + ) + + def test_quota_get_and_list(self): + """ + Test that you can get and list an Object storage quota and usage information. + """ + quota = ObjectStorageQuota( + self.client, + "obj-objects-us-ord-1", + ) + + self.assertIsNotNone(quota) + self.assertEqual(quota.quota_id, "obj-objects-us-ord-1") + self.assertEqual(quota.quota_name, "Object Storage Maximum Objects") + self.assertEqual( + quota.description, + "Maximum number of Objects this customer is allowed to have on this endpoint.", + ) + self.assertEqual(quota.endpoint_type, "E1") + self.assertEqual(quota.s3_endpoint, "us-iad-1.linodeobjects.com") + self.assertEqual(quota.quota_limit, 50) + self.assertEqual(quota.resource_metric, "object") + + quota_usage_url = "/object-storage/quotas/obj-objects-us-ord-1/usage" + with self.mock_get(quota_usage_url) as m: + usage = quota.usage() + self.assertIsNotNone(usage) + self.assertEqual(m.call_url, quota_usage_url) + self.assertEqual(usage.quota_limit, 100) + self.assertEqual(usage.usage, 10) + + quota_list_url = "/object-storage/quotas" + with self.mock_get(quota_list_url) as m: + quotas = self.client.object_storage.quotas() + self.assertIsNotNone(quotas) + self.assertEqual(m.call_url, quota_list_url) + self.assertEqual(len(quotas), 2) + self.assertEqual(quotas[0].quota_id, "obj-objects-us-ord-1") + self.assertEqual( + quotas[0].quota_name, "Object Storage Maximum Objects" + ) + self.assertEqual( + quotas[0].description, + "Maximum number of Objects this customer is allowed to have on this endpoint.", + ) + self.assertEqual(quotas[0].endpoint_type, "E1") + self.assertEqual( + quotas[0].s3_endpoint, "us-iad-1.linodeobjects.com" + ) + self.assertEqual(quotas[0].quota_limit, 50) + self.assertEqual(quotas[0].resource_metric, "object") diff --git a/test/unit/objects/placement_test.py b/test/unit/objects/placement_test.py new file mode 100644 index 000000000..08fcdc1e4 --- /dev/null +++ b/test/unit/objects/placement_test.py @@ -0,0 +1,79 @@ +from test.unit.base import ClientBaseCase + +from linode_api4.objects import ( + MigratedInstance, + PlacementGroup, + PlacementGroupMember, +) + + +class PlacementTest(ClientBaseCase): + """ + Tests methods of the Placement Group + """ + + def test_get_placement_group(self): + """ + Tests that a Placement Group is loaded correctly by ID + """ + + pg = PlacementGroup(self.client, 123) + assert not pg._populated + + self.validate_pg_123(pg) + assert pg._populated + + def test_pg_assign(self): + """ + Tests that you can assign to a PG. + """ + + pg = PlacementGroup(self.client, 123) + assert not pg._populated + + with self.mock_post("/placement/groups/123") as m: + pg.assign([123], compliant_only=True) + + assert m.call_url == "/placement/groups/123/assign" + + # Ensure the PG state was populated + assert pg._populated + + self.assertEqual( + m.call_data, + {"linodes": [123], "compliant_only": True}, + ) + + def test_pg_unassign(self): + """ + Tests that you can unassign from a PG. + """ + + pg = PlacementGroup(self.client, 123) + assert not pg._populated + + with self.mock_post("/placement/groups/123") as m: + pg.unassign([123]) + + assert m.call_url == "/placement/groups/123/unassign" + + # Ensure the PG state was populated + assert pg._populated + + self.assertEqual( + m.call_data, + {"linodes": [123]}, + ) + + def validate_pg_123(self, pg: PlacementGroup): + assert pg.id == 123 + assert pg.label == "test" + assert pg.region.id == "eu-west" + assert pg.placement_group_type == "anti_affinity:local" + assert pg.placement_group_policy == "strict" + assert pg.is_compliant + assert pg.members[0] == PlacementGroupMember( + linode_id=123, is_compliant=True + ) + assert pg.migrations.inbound[0] == MigratedInstance(linode_id=123) + assert pg.migrations.outbound[0] == MigratedInstance(linode_id=456) diff --git a/test/unit/objects/profile_test.py b/test/unit/objects/profile_test.py new file mode 100644 index 000000000..cbe8dabd7 --- /dev/null +++ b/test/unit/objects/profile_test.py @@ -0,0 +1,113 @@ +from datetime import datetime +from test.unit.base import ClientBaseCase + +from linode_api4.objects import Profile, ProfileLogin, SSHKey +from linode_api4.objects.profile import TrustedDevice + + +class SSHKeyTest(ClientBaseCase): + """ + Tests methods of the SSHKey class + """ + + def test_get_ssh_key(self): + """ + Tests that an SSHKey is loaded correctly by ID + """ + key = SSHKey(self.client, 22) + self.assertEqual(key._populated, False) + + self.assertEqual(key.label, "Home Ubuntu PC") + self.assertEqual(key._populated, True) + + self.assertEqual( + key.created, + datetime(year=2018, month=9, day=14, hour=13, minute=0, second=0), + ) + self.assertEqual(key.id, 22) + self.assertEqual( + key.ssh_key, + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98" + "ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCD" + "eWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWO" + "TVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3i" + "muc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRo" + "F2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoC" + "MGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qy" + "nZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz" + "/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfz" + "tUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36v" + "TvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eH" + "d1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23b" + "cKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCI" + "CmhW7erNJNVxYjtzseGpBLmRRUTsT038w== dorthu@dorthu-command", + ) + + def test_update_ssh_key(self): + """ + Tests that an SSHKey can be updated + """ + key = SSHKey(self.client, 22) + + key.label = "New Label" + + with self.mock_put("profile/sshkeys/22") as m: + key.save() + + self.assertEqual(m.call_url, "/profile/sshkeys/22") + self.assertEqual(m.call_data, {"label": "New Label"}) + + def test_delete_ssh_key(self): + """ + Tests that and SSHKey can be deleted + """ + key = SSHKey(self.client, 22) + + with self.mock_delete() as m: + key.delete() + + self.assertEqual(m.call_url, "/profile/sshkeys/22") + + +class ProfileTest(ClientBaseCase): + """ + Tests methods of the Profile class + """ + + def test_get_profile(self): + """ + Tests that a Profile is loaded correctly by ID + """ + profile = Profile(self.client, "exampleUser") + + self.assertEqual(profile.username, "exampleUser") + self.assertEqual(profile.authentication_type, "password") + self.assertIsNotNone(profile.authorized_keys) + self.assertEqual(profile.email, "example-user@gmail.com") + self.assertTrue(profile.email_notifications) + self.assertFalse(profile.ip_whitelist_enabled) + self.assertEqual(profile.lish_auth_method, "keys_only") + self.assertIsNotNone(profile.referrals) + self.assertFalse(profile.restricted) + self.assertEqual(profile.timezone, "US/Eastern") + self.assertTrue(profile.two_factor_auth) + self.assertEqual(profile.uid, 1234) + self.assertEqual(profile.verified_phone_number, "+5555555555") + + def test_get_trusted_device(self): + device = TrustedDevice(self.client, 123) + + self.assertEqual(device.id, 123) + self.assertEqual( + device.user_agent, + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36 Vivaldi/2.1.1337.36\n", + ) + + def test_get_login(self): + login = ProfileLogin(self.client, 123) + + self.assertEqual(login.id, 123) + self.assertEqual(login.ip, "192.0.2.0") + self.assertEqual(login.status, "successful") + self.assertEqual(login.username, "example_user") + self.assertTrue(login.restricted) diff --git a/test/unit/objects/property_alias_test.py b/test/unit/objects/property_alias_test.py new file mode 100644 index 000000000..09efa0e7e --- /dev/null +++ b/test/unit/objects/property_alias_test.py @@ -0,0 +1,191 @@ +""" +Tests for Property alias_of functionality +""" + +from test.unit.base import ClientBaseCase + +from linode_api4.objects import Base, Property + + +class PropertyAliasTest(ClientBaseCase): + """Test cases for Property alias_of parameter""" + + def test_alias_populate_from_json(self): + """Test that aliased properties are populated correctly from JSON""" + + class TestModel(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + "service_class": Property(mutable=True, alias_of="class"), + "label": Property(mutable=True), + } + + json_data = { + "id": 123, + "class": "premium", + "label": "test-label", + } + + obj = TestModel(self.client, 123, json_data) + + # The aliased property should be set using the Python-friendly name + self.assertEqual(obj.service_class, "premium") + self.assertEqual(obj.label, "test-label") + + def test_alias_serialize(self): + """Test that aliased properties serialize back to original API names""" + + class TestModel(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + "service_class": Property(mutable=True, alias_of="class"), + "label": Property(mutable=True), + } + + obj = TestModel(self.client, 123) + obj._set("service_class", "premium") + obj._set("label", "test-label") + obj._set("_populated", True) + + result = obj._serialize() + + # The serialized output should use the original API attribute name + self.assertIn("class", result) + self.assertEqual(result["class"], "premium") + self.assertEqual(result["label"], "test-label") + # Should not contain the aliased name + self.assertNotIn("service_class", result) + + def test_properties_with_alias(self): + """Test that properties_with_alias returns correct mapping""" + + class TestModel(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + "service_class": Property(mutable=True, alias_of="class"), + "beta_type": Property(alias_of="type"), + "label": Property(mutable=True), + } + + obj = TestModel(self.client, 123) + + alias_map = obj.properties_with_alias + + # Should contain mappings for aliased properties + self.assertIn("class", alias_map) + self.assertIn("type", alias_map) + + # Should map to tuples of (alias_name, Property) + alias_name, prop = alias_map["class"] + self.assertEqual(alias_name, "service_class") + self.assertEqual(prop.alias_of, "class") + + alias_name, prop = alias_map["type"] + self.assertEqual(alias_name, "beta_type") + self.assertEqual(prop.alias_of, "type") + + # Non-aliased properties should not be in the map + self.assertNotIn("label", alias_map) + self.assertNotIn("id", alias_map) + + def test_alias_no_conflict_with_regular_properties(self): + """Test that aliased properties don't conflict with regular properties""" + + class TestModel(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + "service_class": Property(mutable=True, alias_of="class"), + "label": Property(mutable=True), + "status": Property(), + } + + json_data = { + "id": 123, + "class": "premium", + "label": "test-label", + "status": "active", + } + + obj = TestModel(self.client, 123, json_data) + + # All properties should be set correctly + self.assertEqual(obj.service_class, "premium") + self.assertEqual(obj.label, "test-label") + self.assertEqual(obj.status, "active") + + def test_multiple_aliases(self): + """Test handling multiple aliased properties""" + + class TestModel(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + "service_class": Property(mutable=True, alias_of="class"), + "beta_type": Property(mutable=True, alias_of="type"), + "import_data": Property(mutable=True, alias_of="import"), + } + + json_data = { + "id": 123, + "class": "premium", + "type": "beta", + "import": "data", + } + + obj = TestModel(self.client, 123, json_data) + + # All aliased properties should be populated + self.assertEqual(obj.service_class, "premium") + self.assertEqual(obj.beta_type, "beta") + self.assertEqual(obj.import_data, "data") + + # Serialization should use original names + obj._set("_populated", True) + result = obj._serialize() + + self.assertEqual(result["class"], "premium") + self.assertEqual(result["type"], "beta") + self.assertEqual(result["import"], "data") + + def test_alias_with_none_value(self): + """Test that aliased properties handle None values correctly""" + + class TestModel(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + "service_class": Property(mutable=True, alias_of="class"), + } + + json_data = { + "id": 123, + "class": None, + } + + obj = TestModel(self.client, 123, json_data) + + # The aliased property should be None + self.assertIsNone(obj.service_class) + + def test_alias_cached_property(self): + """Test that properties_with_alias is cached""" + + class TestModel(Base): + api_endpoint = "/test/{id}" + properties = { + "id": Property(identifier=True), + "service_class": Property(alias_of="class"), + } + + obj = TestModel(self.client, 123) + + # Access the cached property twice + result1 = obj.properties_with_alias + result2 = obj.properties_with_alias + + # Should return the same object (cached) + self.assertIs(result1, result2) diff --git a/test/unit/objects/region_test.py b/test/unit/objects/region_test.py new file mode 100644 index 000000000..7bc3ae9f8 --- /dev/null +++ b/test/unit/objects/region_test.py @@ -0,0 +1,63 @@ +from test.unit.base import ClientBaseCase + +from linode_api4.objects import Region + + +class RegionTest(ClientBaseCase): + """ + Tests methods of the Region class + """ + + def test_get_region(self): + """ + Tests that a Region is loaded correctly by ID + """ + region = Region(self.client, "us-east") + + self.assertEqual(region.id, "us-east") + self.assertEqual(region.country, "us") + self.assertEqual(region.label, "label7") + self.assertEqual(region.status, "ok") + self.assertIsNotNone(region.resolvers) + self.assertEqual(region.site_type, "core") + self.assertEqual( + region.placement_group_limits.maximum_pgs_per_customer, 5 + ) + self.assertEqual( + region.placement_group_limits.maximum_linodes_per_pg, 5 + ) + + # Test monitors section + self.assertIsNotNone(region.monitors) + self.assertEqual(region.monitors.alerts, ["Managed Databases"]) + self.assertEqual(region.monitors.metrics, ["Managed Databases"]) + + self.assertIsNotNone(region.capabilities) + self.assertIn("Linode Interfaces", region.capabilities) + + def test_region_availability(self): + """ + Tests that availability for a specific region can be listed and filtered on. + """ + avail_entries = Region(self.client, "us-east").availability + + for entry in avail_entries: + assert entry.region is not None + assert len(entry.region) > 0 + + assert entry.plan is not None + assert len(entry.plan) > 0 + + assert entry.available is not None + + def test_region_vpc_availability(self): + """ + Tests that VPC availability for a specific region can be retrieved. + """ + vpc_avail = Region(self.client, "us-east").vpc_availability + + assert vpc_avail is not None + assert vpc_avail.region == "us-east" + assert vpc_avail.available is True + assert vpc_avail.available_ipv6_prefix_lengths is not None + assert isinstance(vpc_avail.available_ipv6_prefix_lengths, list) diff --git a/test/unit/objects/serializable_test.py b/test/unit/objects/serializable_test.py new file mode 100644 index 000000000..f7dff4297 --- /dev/null +++ b/test/unit/objects/serializable_test.py @@ -0,0 +1,110 @@ +from dataclasses import dataclass +from test.unit.base import ClientBaseCase +from typing import Optional, Union + +from linode_api4 import Base, ExplicitNullValue, JSONObject, Property + + +class JSONObjectTest(ClientBaseCase): + def test_serialize_optional(self): + @dataclass + class Foo(JSONObject): + always_include = {"foo"} + + foo: Optional[str] = None + bar: Optional[str] = None + baz: str = None + foobar: Union[str, ExplicitNullValue, None] = None + + foo = Foo().dict + + assert foo["foo"] is None + assert "bar" not in foo + assert foo["baz"] is None + assert "foobar" not in foo + + foo = Foo(foo="test", bar="test2", baz="test3", foobar="test4").dict + + assert foo["foo"] == "test" + assert foo["bar"] == "test2" + assert foo["baz"] == "test3" + assert foo["foobar"] == "test4" + + def test_serialize_optional_include_None(self): + @dataclass + class Foo(JSONObject): + include_none_values = True + + foo: Optional[str] = None + bar: Optional[str] = None + baz: str = None + foobar: Union[str, ExplicitNullValue, None] = None + + foo = Foo().dict + + assert foo["foo"] is None + assert foo["bar"] is None + assert foo["baz"] is None + assert foo["foobar"] is None + + foo = Foo( + foo="test", bar="test2", baz="test3", foobar=ExplicitNullValue() + ).dict + + assert foo["foo"] == "test" + assert foo["bar"] == "test2" + assert foo["baz"] == "test3" + assert foo["foobar"] is None + + def test_serialize_put_class(self): + """ + Ensures that the JSONObject put_class ClassVar functions as expected. + """ + + @dataclass + class SubStructOptions(JSONObject): + test1: Optional[str] = None + + @dataclass + class SubStruct(JSONObject): + put_class = SubStructOptions + + test1: str = "" + test2: int = 0 + + class Model(Base): + api_endpoint = "/foo/bar" + + properties = { + "id": Property(identifier=True), + "substruct": Property(mutable=True, json_object=SubStruct), + } + + mock_response = { + "id": 123, + "substruct": { + "test1": "abc", + "test2": 321, + }, + } + + with self.mock_get(mock_response) as mock: + obj = self.client.load(Model, 123) + + assert mock.called + + assert obj.id == 123 + assert obj.substruct.test1 == "abc" + assert obj.substruct.test2 == 321 + + obj.substruct.test1 = "cba" + + with self.mock_put(mock_response) as mock: + obj.save() + + assert mock.called + assert mock.call_data == { + "substruct": { + "test1": "cba", + } + } diff --git a/test/unit/objects/support_test.py b/test/unit/objects/support_test.py new file mode 100644 index 000000000..0c1ac346a --- /dev/null +++ b/test/unit/objects/support_test.py @@ -0,0 +1,28 @@ +from test.unit.base import ClientBaseCase + +from linode_api4.objects import SupportTicket + + +class SupportTest(ClientBaseCase): + """ + Tests methods of the SupportTicket class + """ + + def test_get_support_ticket(self): + ticket = SupportTicket(self.client, 123) + + self.assertIsNotNone(ticket.attachments) + self.assertFalse(ticket.closable) + self.assertIsNotNone(ticket.entity) + self.assertEqual(ticket.gravatar_id, "474a1b7373ae0be4132649e69c36ce30") + self.assertEqual(ticket.id, 123) + self.assertEqual(ticket.opened_by, "some_user") + self.assertEqual(ticket.status, "open") + self.assertEqual(ticket.updated_by, "some_other_user") + + def test_support_ticket_close(self): + ticket = SupportTicket(self.client, 123) + + with self.mock_post({}) as m: + ticket.support_ticket_close() + self.assertEqual(m.call_url, "/support/tickets/123/close") diff --git a/test/unit/objects/tag_test.py b/test/unit/objects/tag_test.py new file mode 100644 index 000000000..137d11deb --- /dev/null +++ b/test/unit/objects/tag_test.py @@ -0,0 +1,46 @@ +from test.unit.base import ClientBaseCase + +from linode_api4.objects import Tag + + +class TagTest(ClientBaseCase): + """ + Tests methods of the Tag class + """ + + def test_get_tag(self): + """ + Tests that Tag is loaded correctly by label + """ + tag = Tag(self.client, "something") + + self.assertEqual(tag.label, "something") + self.assertFalse(hasattr(tag, "_raw_objects")) + + def test_load_tag(self): + """ + Tests that the LinodeClient can load a tag + """ + tag = self.client.load(Tag, "something") + + self.assertEqual(tag.label, "something") + self.assertTrue(hasattr(tag, "_raw_objects")) # got the raw objects + print(tag._raw_objects) + + # objects loaded up right + self.assertEqual(len(tag.objects), 1) + self.assertEqual(tag.objects[0].id, 123) + self.assertEqual(tag.objects[0].label, "linode123") + self.assertEqual(tag.objects[0].tags, ["something"]) + + def test_delete_tag(self): + """ + Tests that you can delete a tag + """ + with self.mock_delete() as m: + tag = Tag(self.client, "nothing") + result = tag.delete() + + self.assertEqual(result, True) + + self.assertEqual(m.call_url, "/tags/nothing") diff --git a/test/unit/objects/volume_test.py b/test/unit/objects/volume_test.py new file mode 100644 index 000000000..1344c2b94 --- /dev/null +++ b/test/unit/objects/volume_test.py @@ -0,0 +1,100 @@ +from datetime import datetime +from test.unit.base import ClientBaseCase + +from linode_api4.objects import Volume + + +class VolumeTest(ClientBaseCase): + """ + Tests methods of the Volume class + """ + + def test_get_volume(self): + """ + Tests that a volume is loaded correctly by ID + """ + volume = Volume(self.client, 1) + self.assertEqual(volume._populated, False) + + self.assertEqual(volume.label, "block1") + self.assertEqual(volume._populated, True) + + self.assertEqual(volume.size, 40) + self.assertEqual(volume.linode, None) + self.assertEqual(volume.status, "active") + self.assertIsInstance(volume.updated, datetime) + self.assertEqual(volume.region.id, "us-east-1a") + + assert volume.tags == ["something"] + + self.assertEqual(volume.filesystem_path, "this/is/a/file/path") + self.assertEqual(volume.hardware_type, "hdd") + self.assertEqual(volume.linode_label, None) + + def test_get_volume_with_encryption(self): + volume = Volume(self.client, 4) + self.assertEqual(volume.encryption, "enabled") + + def test_update_volume_tags(self): + """ + Tests that updating tags on an entity send the correct request + """ + volume = self.client.volumes().first() + + with self.mock_put("volumes/1") as m: + volume.tags = ["test1", "test2"] + volume.save() + + assert m.call_url == "/volumes/{}".format(volume.id) + assert m.call_data["tags"] == ["test1", "test2"] + + def test_clone_volume(self): + """ + Tests that cloning a volume returns new volume object with + same region and the given label + """ + volume_to_clone = self.client.volumes().first() + + with self.mock_post(f"volumes/{volume_to_clone.id}") as mock: + new_volume = volume_to_clone.clone("new-volume") + assert mock.call_url == f"/volumes/{volume_to_clone.id}/clone" + self.assertEqual( + str(new_volume.region), + str(volume_to_clone.region), + "the regions should be the same", + ) + assert new_volume.id != str(volume_to_clone.id) + + def test_resize_volume(self): + """ + Tests that resizing a given volume volume works + """ + volume = self.client.volumes().first() + + with self.mock_post(f"volumes/{volume.id}") as mock: + volume.resize(3048) + assert mock.call_url == f"/volumes/{volume.id}/resize" + assert str(mock.call_data["size"]) == "3048" + + def test_detach_volume(self): + """ + Tests that detaching the volume succeeds + """ + volume = self.client.volumes()[2] + + with self.mock_post(f"volumes/{volume.id}") as mock: + result = volume.detach() + assert mock.call_url == f"/volumes/{volume.id}/detach" + assert result is True + + def test_attach_volume_to_linode(self): + """ + Tests that the given volume attaches to the Linode via id + """ + volume = self.client.volumes().first() + + with self.mock_post(f"volumes/{volume.id}") as mock: + result = volume.attach(1) + assert mock.call_url == f"/volumes/{volume.id}/attach" + assert result is True + assert str(mock.call_data["linode_id"]) == "1" diff --git a/test/unit/objects/vpc_test.py b/test/unit/objects/vpc_test.py new file mode 100644 index 000000000..90ec348da --- /dev/null +++ b/test/unit/objects/vpc_test.py @@ -0,0 +1,174 @@ +import datetime +from test.unit.base import ClientBaseCase + +from linode_api4 import DATE_FORMAT, VPC, VPCSubnet + + +class VPCTest(ClientBaseCase): + """ + Tests methods of the VPC Group + """ + + def test_get_vpc(self): + """ + Tests that a VPC is loaded correctly by ID + """ + + vpc = VPC(self.client, 123456) + self.assertEqual(vpc._populated, False) + + self.validate_vpc_123456(vpc) + self.assertEqual(vpc._populated, True) + + def test_list_vpcs(self): + """ + Tests that you can list VPCs. + """ + + vpcs = self.client.vpcs() + + self.validate_vpc_123456(vpcs[0]) + self.assertEqual(vpcs[0]._populated, True) + + def test_get_subnet(self): + """ + Tests that you can list VPCs. + """ + + subnet = VPCSubnet(self.client, 789, 123456) + + self.assertEqual(subnet._populated, False) + + self.validate_vpc_subnet_789(subnet) + self.assertEqual(subnet._populated, True) + self.assertEqual(subnet.linodes[0].id, 12345) + self.assertEqual(subnet.linodes[0].interfaces[0].id, 678) + self.assertEqual(len(subnet.linodes[0].interfaces), 2) + self.assertEqual(subnet.linodes[0].interfaces[1].active, False) + + def test_list_subnets(self): + """ + Tests that you can list VPCs. + """ + + subnets = self.client.vpcs()[0].subnets + + self.validate_vpc_subnet_789(subnets[0]) + + def test_create_subnet(self): + """ + Tests that you can create a subnet. + """ + + with self.mock_post("/vpcs/123456/subnets/789") as m: + vpc = VPC(self.client, 123456) + subnet = vpc.subnet_create("test-subnet", "10.0.0.0/24") + + self.assertEqual(m.call_url, "/vpcs/123456/subnets") + + self.assertEqual( + m.call_data, + { + "label": "test-subnet", + "ipv4": "10.0.0.0/24", + }, + ) + + self.validate_vpc_subnet_789(subnet) + + def test_list_ips(self): + """ + Validates that all VPC IPs can be listed. + """ + + with self.mock_get("/vpcs/ips") as m: + result = self.client.vpcs.ips() + + assert m.call_url == "/vpcs/ips" + assert len(result) == 1 + + ip = result[0] + assert ip.address == "10.0.0.2" + assert ip.address_range is None + assert ip.vpc_id == 123 + assert ip.subnet_id == 456 + assert ip.region == "us-mia" + assert ip.linode_id == 123 + assert ip.config_id == 456 + assert ip.interface_id == 789 + assert ip.active + assert ip.nat_1_1 == "172.233.179.133" + assert ip.gateway == "10.0.0.1" + assert ip.prefix == 24 + assert ip.subnet_mask == "255.255.255.0" + + def validate_vpc_123456(self, vpc: VPC): + expected_dt = datetime.datetime.strptime( + "2018-01-01T00:01:01", DATE_FORMAT + ) + + self.assertEqual(vpc.label, "test-vpc") + self.assertEqual(vpc.description, "A very real VPC.") + self.assertEqual(vpc.region.id, "us-southeast") + self.assertEqual(vpc.created, expected_dt) + self.assertEqual(vpc.updated, expected_dt) + + self.assertEqual(vpc.ipv6[0].range, "fd71:1140:a9d0::/52") + + def validate_vpc_subnet_789(self, subnet: VPCSubnet): + expected_dt = datetime.datetime.strptime( + "2018-01-01T00:01:01", DATE_FORMAT + ) + + assert subnet.label == "test-subnet" + assert subnet.ipv4 == "10.0.0.0/24" + assert subnet.linodes[0].id == 12345 + assert subnet.created == expected_dt + assert subnet.updated == expected_dt + + assert subnet.databases[0].id == 12345 + assert subnet.databases[0].ipv4_range == "10.0.0.0/24" + assert subnet.databases[0].ipv6_ranges == ["2001:db8::/64"] + + assert subnet.linodes[0].interfaces[0].id == 678 + assert subnet.linodes[0].interfaces[0].active + assert subnet.linodes[0].interfaces[0].config_id is None + + assert subnet.linodes[0].interfaces[1].id == 543 + assert not subnet.linodes[0].interfaces[1].active + assert subnet.linodes[0].interfaces[1].config_id is None + + self.assertEqual(subnet.ipv6[0].range, "fd71:1140:a9d0::/52") + + def test_list_vpc_ips(self): + """ + Test that the ips under a specific VPC can be listed. + """ + vpc = VPC(self.client, 123456) + vpc_ips = vpc.ips + + self.assertGreater(len(vpc_ips), 0) + + vpc_ip = vpc_ips[0] + + self.assertEqual(vpc_ip.vpc_id, vpc.id) + self.assertEqual(vpc_ip.address, "10.0.0.2") + self.assertEqual(vpc_ip.address_range, None) + self.assertEqual(vpc_ip.subnet_id, 654321) + self.assertEqual(vpc_ip.region, "us-ord") + self.assertEqual(vpc_ip.linode_id, 111) + self.assertEqual(vpc_ip.config_id, 222) + self.assertEqual(vpc_ip.interface_id, 333) + self.assertEqual(vpc_ip.active, True) + self.assertEqual(vpc_ip.nat_1_1, None) + self.assertEqual(vpc_ip.gateway, "10.0.0.1") + self.assertEqual(vpc_ip.prefix, 8) + self.assertEqual(vpc_ip.subnet_mask, "255.0.0.0") + + vpc_ip_2 = vpc_ips[2] + + self.assertEqual(vpc_ip_2.ipv6_range, "fd71:1140:a9d0::/52") + self.assertEqual(vpc_ip_2.ipv6_is_public, True) + self.assertEqual( + vpc_ip_2.ipv6_addresses[0].slaac_address, "fd71:1140:a9d0::/52" + ) diff --git a/test/unit/paginated_list_test.py b/test/unit/paginated_list_test.py new file mode 100644 index 000000000..2d6705561 --- /dev/null +++ b/test/unit/paginated_list_test.py @@ -0,0 +1,136 @@ +from unittest import TestCase +from unittest.mock import MagicMock, call + +from linode_api4.paginated_list import PaginatedList + + +class PaginationSlicingTest(TestCase): + def setUp(self): + """ + Creates sample mocked lists for use in the test cases + """ + self.normal_list = list(range(25)) + self.paginated_list = PaginatedList( + None, None, page=self.normal_list, total_items=25 + ) + + def test_slice_normal(self): + """ + Tests that bounded, forward slices work as expected + """ + slices = ((1, 10), (10, 20), (5, 25), (0, 10)) + + for start, stop in slices: + self.assertEqual( + self.normal_list[start:stop], self.paginated_list[start:stop] + ) + + def test_slice_negative(self): + """ + Tests that negative indexing works in slices + """ + slices = ((-10, -5), (-20, 20), (3, -10)) + + for start, stop in slices: + self.assertEqual( + self.normal_list[start:stop], self.paginated_list[start:stop] + ) + + def test_slice_no_lower_bound(self): + """ + Tests that slices without lower bounds work + """ + self.assertEqual(self.normal_list[:5], self.paginated_list[:5]) + + def test_slice_no_upper_bound(self): + """ + Tests that slices without upper bounds work + """ + self.assertEqual(self.normal_list[5:], self.paginated_list[5:]) + + def test_slice_boundless(self): + """ + Tests that unbound slices work + """ + self.assertEqual(self.normal_list[:], self.paginated_list[:]) + + def test_slice_bad_negative_index(self): + """ + Tests that an IndexError is raised when a bad negative index is given + """ + with self.assertRaises(IndexError): + self.paginated_list[:-30] + + def test_slice_bad_index(self): + """ + Tests that out of bounds indexes in slices work + """ + self.assertEqual(self.normal_list[30:], self.paginated_list[30:]) + + def test_slice_unsupported_step(self): + """ + Tests that steps outside of 1 raise a NotImplementedError + """ + for step in (-1, 0, 2, 3): + with self.assertRaises(NotImplementedError): + self.paginated_list[::step] + + def test_slice_backward_indexing(self): + """ + Tests that backwards indexing works as expected + """ + self.assertEqual(self.normal_list[10:5], self.paginated_list[10:5]) + + +class TestModel: + """ + This is a test model class used to simulate an actual model that would be + returned by the API + """ + + @classmethod + def make_instance(*args, **kwargs): + return TestModel() + + +class PageLoadingTest(TestCase): + def test_page_size_in_request(self): + """ + Tests that the correct page_size is added to requests when loading subsequent pages + """ + + for i in (25, 100, 500): + # these are the pages we're sending in to the mocked list + first_page = [TestModel() for x in range(i)] + second_page = { + "data": [{"id": 1}], + "pages": 2, + "page": 2, + "results": i + 1, + } + + # our mock client to intercept the requests and return the mocked info + client = MagicMock() + client.get = MagicMock(return_value=second_page) + + # let's do it! + p = PaginatedList( + client, "/test", page=first_page, max_pages=2, total_items=i + 1 + ) + p[i] # load second page + + # and we called the next page URL with the correct page_size + assert client.get.call_args == call( + "//test?page=2&page_size={}".format(i), filters=None + ) + + def test_no_pages(self): + """ + Tests that this library correctly handles paginated lists with no data, such + as if a paginated endpoint is given a filter that matches nothing. + """ + client = MagicMock() + + p = PaginatedList(client, "/test", page=[], max_pages=0, total_items=0) + + assert len(p) == 0 diff --git a/test/unit/util_test.py b/test/unit/util_test.py new file mode 100644 index 000000000..35adf38ff --- /dev/null +++ b/test/unit/util_test.py @@ -0,0 +1,174 @@ +import unittest + +from linode_api4.util import drop_null_keys, generate_device_suffixes + + +class UtilTest(unittest.TestCase): + """ + Tests for utility functions. + """ + + def test_drop_null_keys_nonrecursive(self): + """ + Tests whether a non-recursive drop_null_keys call works as expected. + """ + value = { + "foo": "bar", + "test": None, + "cool": { + "test": "bar", + "cool": None, + }, + } + + expected_output = {"foo": "bar", "cool": {"test": "bar", "cool": None}} + + assert drop_null_keys(value, recursive=False) == expected_output + + def test_drop_null_keys_recursive(self): + """ + Tests whether a recursive drop_null_keys call works as expected. + """ + + value = { + "foo": "bar", + "test": None, + "cool": { + "test": "bar", + "cool": None, + "list": [{"foo": "bar", "test": None}], + }, + } + + expected_output = { + "foo": "bar", + "cool": { + "test": "bar", + "list": [ + { + "foo": "bar", + } + ], + }, + } + + assert drop_null_keys(value) == expected_output + + def test_generate_device_suffixes(self): + """ + Tests whether generate_device_suffixes works as expected. + """ + + expected_output_12 = [ + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + ] + assert generate_device_suffixes(12) == expected_output_12 + + expected_output_30 = [ + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "aa", + "ab", + "ac", + "ad", + ] + assert generate_device_suffixes(30) == expected_output_30 + + expected_output_60 = [ + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "aa", + "ab", + "ac", + "ad", + "ae", + "af", + "ag", + "ah", + "ai", + "aj", + "ak", + "al", + "am", + "an", + "ao", + "ap", + "aq", + "ar", + "as", + "at", + "au", + "av", + "aw", + "ax", + "ay", + "az", + "ba", + "bb", + "bc", + "bd", + "be", + "bf", + "bg", + "bh", + ] + assert generate_device_suffixes(60) == expected_output_60 diff --git a/tox.ini b/tox.ini new file mode 100644 index 000000000..266c26717 --- /dev/null +++ b/tox.ini @@ -0,0 +1,17 @@ +[tox] +envlist = py38,py39,py310,py311,py312 +skip_missing_interpreters = true + +[testenv] +deps = + pytest + coverage + mock + pylint + httpretty + pytest-rerunfailures +commands = + python -m pip install . + coverage run --source linode_api4 -m pytest test/unit + coverage report + pylint linode_api4