diff --git a/.env.example b/.env.example
new file mode 100644
index 00000000..8608f2d0
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,6 @@
+#! /bin/sh
+
+export DIGITALOCEAN_ACCESS_TOKEN=replace_me
+export GRADIENT_MODEL_ACCESS_KEY=replace_me
+export GRADIENT_AGENT_ACCESS_KEY=replace_me
+export GRADIENT_AGENT_ENDPOINT=https://your-agent-subdomain.agents.do-ai.run
\ No newline at end of file
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 88980c93..d53cf87b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -2,27 +2,32 @@ name: CI
on:
push:
branches-ignore:
- - 'generated'
- - 'codegen/**'
- - 'integrated/**'
- - 'stl-preview-head/**'
- - 'stl-preview-base/**'
+ - "generated"
+ - "codegen/**"
+ - "integrated/**"
+ - "stl-preview-head/**"
+ - "stl-preview-base/**"
+ pull_request:
+ branches-ignore:
+ - "stl-preview-head/**"
+ - "stl-preview-base/**"
jobs:
lint:
timeout-minutes: 10
name: lint
- runs-on: ${{ github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ runs-on: ${{ github.repository == 'stainless-sdks/gradient-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Install Rye
run: |
curl -sSf https://rye.astral.sh/get | bash
- echo "$HOME/.rye/shims" >> $GITHUB_PATH
+ echo "$HOME/.rye/shims" >> "$GITHUB_PATH"
env:
- RYE_VERSION: '0.44.0'
- RYE_INSTALL_OPTION: '--yes'
+ RYE_VERSION: "0.44.0"
+ RYE_INSTALL_OPTION: "--yes"
- name: Install dependencies
run: rye sync --all-features
@@ -30,24 +35,44 @@ jobs:
- name: Run lints
run: ./scripts/lint
- upload:
- if: github.repository == 'stainless-sdks/digitalocean-genai-sdk-python'
+ build:
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
timeout-minutes: 10
- name: upload
+ name: build
permissions:
contents: read
id-token: write
- runs-on: depot-ubuntu-24.04
+ runs-on: ${{ github.repository == 'stainless-sdks/gradient-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
+
+ - name: Install Rye
+ run: |
+ curl -sSf https://rye.astral.sh/get | bash
+ echo "$HOME/.rye/shims" >> "$GITHUB_PATH"
+ env:
+ RYE_VERSION: "0.44.0"
+ RYE_INSTALL_OPTION: "--yes"
+
+ - name: Install dependencies
+ run: rye sync --all-features
+
+ - name: Run build
+ run: rye build
- name: Get GitHub OIDC Token
+ if: |-
+ github.repository == 'stainless-sdks/gradient-python' &&
+ !startsWith(github.ref, 'refs/heads/stl/')
id: github-oidc
- uses: actions/github-script@v6
+ uses: actions/github-script@v8
with:
script: core.setOutput('github_token', await core.getIDToken());
- name: Upload tarball
+ if: |-
+ github.repository == 'stainless-sdks/gradient-python' &&
+ !startsWith(github.ref, 'refs/heads/stl/')
env:
URL: https://pkg.stainless.com/s
AUTH: ${{ steps.github-oidc.outputs.github_token }}
@@ -57,20 +82,49 @@ jobs:
test:
timeout-minutes: 10
name: test
- runs-on: ${{ github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ runs-on: ${{ github.repository == 'stainless-sdks/gradient-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Install Rye
run: |
curl -sSf https://rye.astral.sh/get | bash
- echo "$HOME/.rye/shims" >> $GITHUB_PATH
+ echo "$HOME/.rye/shims" >> "$GITHUB_PATH"
env:
- RYE_VERSION: '0.44.0'
- RYE_INSTALL_OPTION: '--yes'
+ RYE_VERSION: "0.44.0"
+ RYE_INSTALL_OPTION: "--yes"
- name: Bootstrap
run: ./scripts/bootstrap
- name: Run tests
run: ./scripts/test
+
+ smoke:
+ name: smoke
+ # Only run smoke tests on pushes to main repo (not forks) so that secrets can be accessed
+ if: github.repository == 'stainless-sdks/gradient-python' && (github.event_name == 'push' || github.event_name == 'pull_request')
+ runs-on: ${{ github.repository == 'stainless-sdks/gradient-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ timeout-minutes: 10
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install Rye
+ run: |
+ curl -sSf https://rye.astral.sh/get | bash
+ echo "$HOME/.rye/shims" >> "$GITHUB_PATH"
+ env:
+ RYE_VERSION: "0.44.0"
+ RYE_INSTALL_OPTION: "--yes"
+
+ - name: Bootstrap
+ run: ./scripts/bootstrap
+
+ - name: Run smoke tests
+ env:
+ DIGITALOCEAN_ACCESS_TOKEN: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
+ GRADIENT_MODEL_ACCESS_KEY: ${{ secrets.GRADIENT_MODEL_ACCESS_KEY }}
+ GRADIENT_AGENT_ACCESS_KEY: ${{ secrets.GRADIENT_AGENT_ACCESS_KEY }}
+ GRADIENT_AGENT_ENDPOINT: ${{ secrets.GRADIENT_AGENT_ENDPOINT }}
+ run: ./scripts/smoke
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
new file mode 100644
index 00000000..2b8edb2f
--- /dev/null
+++ b/.github/workflows/publish-pypi.yml
@@ -0,0 +1,31 @@
+# This workflow is triggered when a GitHub release is created.
+# It can also be run manually to re-publish to PyPI in case it failed for some reason.
+# You can run this workflow by navigating to https://www.github.com/digitalocean/gradient-python/actions/workflows/publish-pypi.yml
+name: Publish PyPI
+on:
+ workflow_dispatch:
+
+ release:
+ types: [published]
+
+jobs:
+ publish:
+ name: publish
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Install Rye
+ run: |
+ curl -sSf https://rye.astral.sh/get | bash
+ echo "$HOME/.rye/shims" >> "$GITHUB_PATH"
+ env:
+ RYE_VERSION: '0.44.0'
+ RYE_INSTALL_OPTION: '--yes'
+
+ - name: Publish to PyPI
+ run: |
+ bash ./bin/publish-pypi
+ env:
+ PYPI_TOKEN: ${{ secrets.GRADIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }}
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
new file mode 100644
index 00000000..bde3b3ed
--- /dev/null
+++ b/.github/workflows/release-doctor.yml
@@ -0,0 +1,21 @@
+name: Release Doctor
+on:
+ pull_request:
+ branches:
+ - main
+ workflow_dispatch:
+
+jobs:
+ release_doctor:
+ name: release doctor
+ runs-on: ubuntu-latest
+ if: github.repository == 'digitalocean/gradient-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
+
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Check release environment
+ run: |
+ bash ./bin/check-release-environment
+ env:
+ PYPI_TOKEN: ${{ secrets.GRADIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }}
diff --git a/.gitignore b/.gitignore
index 87797408..95ceb189 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,4 @@
.prism.log
-.vscode
_dev
__pycache__
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
new file mode 100644
index 00000000..faa0df4e
--- /dev/null
+++ b/.release-please-manifest.json
@@ -0,0 +1,3 @@
+{
+ ".": "3.12.0"
+}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 84a850f9..8fe2fd00 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 126
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-bdf24159c6ebb5402d6c05a5165cb1501dc37cf6c664baa9eb318efb0f89dddd.yml
-openapi_spec_hash: 686329a97002025d118dc2367755c18d
-config_hash: 39a1554af43cd406e37b5ed5c943649c
+configured_endpoints: 193
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-24bdeb83fbe576e4dd92498321be977e820fd755babee77eb8ff419a6229b22f.yml
+openapi_spec_hash: f1faf4e4e0fc37e518fe55ff7c01086d
+config_hash: 3f968a57adb20643373c134efc9af01a
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 00000000..5b010307
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "python.analysis.importFormat": "relative",
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 00000000..845ea31c
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,725 @@
+# Changelog
+
+## 3.12.0 (2026-03-13)
+
+Full Changelog: [v3.11.0...v3.12.0](https://github.com/digitalocean/gradient-python/compare/v3.11.0...v3.12.0)
+
+### Features
+
+* **api:** manual updates ([e139299](https://github.com/digitalocean/gradient-python/commit/e13929984bbfa991d51a56946a40380a909f1d75))
+* **api:** responses ([3277876](https://github.com/digitalocean/gradient-python/commit/32778769c86140d283a9f456568f68803208ae62))
+* **api:** update urls for endpoints ([2d4da01](https://github.com/digitalocean/gradient-python/commit/2d4da01e55e7f7a96aea98232ef05b28ebbcbada))
+* **client:** add support for binary request streaming ([f9706f4](https://github.com/digitalocean/gradient-python/commit/f9706f4b88e6278a5be1057c30a8939fed415767))
+
+
+### Chores
+
+* format all `api.md` files ([e103720](https://github.com/digitalocean/gradient-python/commit/e103720daa6eaae0454cbb857d97b1b2e43b405f))
+* **internal:** add request options to SSE classes ([17b88d9](https://github.com/digitalocean/gradient-python/commit/17b88d90dafe6a1d9db3f4ea15f17c7cdf838f81))
+* **internal:** codegen related update ([6f506cb](https://github.com/digitalocean/gradient-python/commit/6f506cb1dae5679b5ed992822658f333e382340e))
+* **internal:** codegen related update ([56acb2e](https://github.com/digitalocean/gradient-python/commit/56acb2e47da30e09ab6cbf42c285bf6480a376fe))
+* **internal:** codegen related update ([865fb4c](https://github.com/digitalocean/gradient-python/commit/865fb4cadfe0ce791ecc4ad38f50ce25b00477ee))
+* **internal:** make `test_proxy_environment_variables` more resilient ([09cfd99](https://github.com/digitalocean/gradient-python/commit/09cfd991d0442077c4f66b47f4d9da34f608692d))
+* **internal:** make `test_proxy_environment_variables` more resilient to env ([f3aa13b](https://github.com/digitalocean/gradient-python/commit/f3aa13b09ca2c1f565cc3277530bfa1775d02dd0))
+* **internal:** remove mock server code ([f774a64](https://github.com/digitalocean/gradient-python/commit/f774a64d51891a10e3d30ddaafe556afdf4502e0))
+* update mock server docs ([fa4da88](https://github.com/digitalocean/gradient-python/commit/fa4da887118bdf9462d74328a81a61dc4c4c30e9))
+
+## 3.11.0 (2026-02-17)
+
+Full Changelog: [v3.10.1...v3.11.0](https://github.com/digitalocean/gradient-python/compare/v3.10.1...v3.11.0)
+
+### Features
+
+* **api:** api update ([f67228f](https://github.com/digitalocean/gradient-python/commit/f67228f0bd8de96342d15b1b7c3096e4b03c7aaa))
+
+
+### Bug Fixes
+
+* **client:** loosen auth header validation ([bf5fa9f](https://github.com/digitalocean/gradient-python/commit/bf5fa9f93b6443ac0b41d5fa134aab8ee12cfbc3))
+
+
+### Chores
+
+* **internal:** codegen related update ([07eeda8](https://github.com/digitalocean/gradient-python/commit/07eeda8ae3e1c7bdfe3fcc51a8786f35682f4c3f))
+
+## 3.10.1 (2025-12-19)
+
+Full Changelog: [v3.10.0...v3.10.1](https://github.com/digitalocean/gradient-python/compare/v3.10.0...v3.10.1)
+
+### Bug Fixes
+
+* restore inference base urls ([#122](https://github.com/digitalocean/gradient-python/issues/122)) ([2a1c687](https://github.com/digitalocean/gradient-python/commit/2a1c687b46192afa210c3d159f3e087d3fe1a3b9))
+
+## 3.10.0 (2025-12-19)
+
+Full Changelog: [v3.9.0...v3.10.0](https://github.com/digitalocean/gradient-python/compare/v3.9.0...v3.10.0)
+
+### Features
+
+* **api:** manual updates ([f1c2eb2](https://github.com/digitalocean/gradient-python/commit/f1c2eb25ae1787b661ab1323528077074aa0cab6))
+* **api:** manual updates ([355e13f](https://github.com/digitalocean/gradient-python/commit/355e13f1a4b012e09bc2056179419ede57044b97))
+
+
+### Bug Fixes
+
+* restore inference endpoints ([#120](https://github.com/digitalocean/gradient-python/issues/120)) ([ee792a1](https://github.com/digitalocean/gradient-python/commit/ee792a181e819d8fa26712fe8bc96ffd4c02d2ed))
+
+
+### Chores
+
+* **internal:** add `--fix` argument to lint script ([2825cb7](https://github.com/digitalocean/gradient-python/commit/2825cb750edd261a324c2da28afc3cb6ee90f5e9))
+* run smoke tests on prs ([#121](https://github.com/digitalocean/gradient-python/issues/121)) ([719a5fb](https://github.com/digitalocean/gradient-python/commit/719a5fb4fcf418db9ede5659710377a47d41b6a8))
+
+## 3.9.0 (2025-12-17)
+
+Full Changelog: [v3.8.0...v3.9.0](https://github.com/digitalocean/gradient-python/compare/v3.8.0...v3.9.0)
+
+### Features
+
+* **api:** update via SDK Studio ([4173864](https://github.com/digitalocean/gradient-python/commit/4173864db71088fb5a2e3fc8033462580bb66603))
+* **api:** update via SDK Studio ([f6b12b8](https://github.com/digitalocean/gradient-python/commit/f6b12b8a67014dd608d8260c056d1c75342edda6))
+* **api:** update via SDK Studio ([a9cd7d3](https://github.com/digitalocean/gradient-python/commit/a9cd7d3bc6e2e988901e31064a4e607059c7ac09))
+
+
+### Bug Fixes
+
+* ensure streams are always closed ([80881b5](https://github.com/digitalocean/gradient-python/commit/80881b5248ac8baa2b34043df1d20086f319d2d1))
+* **types:** allow pyright to infer TypedDict types within SequenceNotStr ([b400d38](https://github.com/digitalocean/gradient-python/commit/b400d3808dc93924d7d44b25714bb53ef220bfe8))
+* use async_to_httpx_files in patch method ([33d2306](https://github.com/digitalocean/gradient-python/commit/33d2306ee7211b7180ab156697159b9aa02d564e))
+
+
+### Chores
+
+* add missing docstrings ([9ac1364](https://github.com/digitalocean/gradient-python/commit/9ac136400dbd411b3d2177d20b255b0572861c48))
+* add Python 3.14 classifier and testing ([db08b3f](https://github.com/digitalocean/gradient-python/commit/db08b3fb9a7d07ff02a8d45804647ce7c1e34e5a))
+* **deps:** mypy 1.18.1 has a regression, pin to 1.17 ([4710dcd](https://github.com/digitalocean/gradient-python/commit/4710dcdcc4600546a048e2769abeee056d9383f6))
+* **docs:** use environment variables for authentication in code snippets ([47b051a](https://github.com/digitalocean/gradient-python/commit/47b051af6578df97f84464ae40f04f957a00160a))
+* **internal:** add missing files argument to base client ([8ffa56c](https://github.com/digitalocean/gradient-python/commit/8ffa56c38b3816d5598d83976030e1a8706ec45e))
+* update lockfile ([516734f](https://github.com/digitalocean/gradient-python/commit/516734f2d19eb314061fb27c049a878b8c766313))
+
+## 3.8.0 (2025-11-20)
+
+Full Changelog: [v3.7.0...v3.8.0](https://github.com/digitalocean/gradient-python/compare/v3.7.0...v3.8.0)
+
+### Features
+
+* **api:** manual updates ([244277b](https://github.com/digitalocean/gradient-python/commit/244277b483ac97f733e8f37e0b556cb49813b554))
+
+## 3.7.0 (2025-11-19)
+
+Full Changelog: [v3.6.0...v3.7.0](https://github.com/digitalocean/gradient-python/compare/v3.6.0...v3.7.0)
+
+### Features
+
+* add wait_for_completion method to IndexingJobs resource with sy… ([#49](https://github.com/digitalocean/gradient-python/issues/49)) ([9edc2a6](https://github.com/digitalocean/gradient-python/commit/9edc2a60f5aa49749e151477615bbecb3a79e92b))
+* Add wait_until_ready() method for agent deployment polling ([#56](https://github.com/digitalocean/gradient-python/issues/56)) ([dcef3d5](https://github.com/digitalocean/gradient-python/commit/dcef3d5ebb4ef903c0c91aa4008853bb978f5544))
+* **api:** add inference errors ([d61d495](https://github.com/digitalocean/gradient-python/commit/d61d4955f596d9ac1bebc9387a6573989e823022))
+* **api:** include indexing jobs ([d249d06](https://github.com/digitalocean/gradient-python/commit/d249d0606e26d585eb2b7859948a796ea7860f53))
+
+
+### Bug Fixes
+
+* **client:** close streams without requiring full consumption ([33fe04b](https://github.com/digitalocean/gradient-python/commit/33fe04b2e4ab71094ee13e7b83d4c04867e7d485))
+* compat with Python 3.14 ([add7b21](https://github.com/digitalocean/gradient-python/commit/add7b21b9fbb8987641d5520da638647fe27b159))
+* **compat:** update signatures of `model_dump` and `model_dump_json` for Pydantic v1 ([c945870](https://github.com/digitalocean/gradient-python/commit/c945870a31840d553cb1e3a75314f1c884a56060))
+
+
+### Chores
+
+* bump `httpx-aiohttp` version to 0.1.9 ([db39cc6](https://github.com/digitalocean/gradient-python/commit/db39cc63fb126ac81edfe2cb991493d10a2d0936))
+* **internal/tests:** avoid race condition with implicit client cleanup ([e0202bb](https://github.com/digitalocean/gradient-python/commit/e0202bb915613872095f7f223a49c4480e50be98))
+* **internal:** grammar fix (it's -> its) ([c6ffb3b](https://github.com/digitalocean/gradient-python/commit/c6ffb3becbcb99e36992934fac20d67a6a3b967c))
+* merge issues in test_client.py ([#87](https://github.com/digitalocean/gradient-python/issues/87)) ([62fc025](https://github.com/digitalocean/gradient-python/commit/62fc02512e941c6af18b11c19df8828cca31159d))
+* **package:** drop Python 3.8 support ([825b1e4](https://github.com/digitalocean/gradient-python/commit/825b1e4f8b257fc103c0d45743133bbc81ca3e10))
+
+## 3.6.0 (2025-10-16)
+
+Full Changelog: [v3.5.0...v3.6.0](https://github.com/digitalocean/gradient-python/compare/v3.5.0...v3.6.0)
+
+### Features
+
+* **api:** manual updates ([da88e9e](https://github.com/digitalocean/gradient-python/commit/da88e9eee0adc6152d0d8212305397483be0d686))
+
+
+### Bug Fixes
+
+* lints ([a1b1fc6](https://github.com/digitalocean/gradient-python/commit/a1b1fc6b7747c00d9bfc2b86c6262e9c123416dc))
+* test setup needs all three access keys ([01ac735](https://github.com/digitalocean/gradient-python/commit/01ac735fb965686699df82ec8763b18ceb660972))
+
+## 3.5.0 (2025-10-14)
+
+Full Changelog: [v3.4.0...v3.5.0](https://github.com/digitalocean/gradient-python/compare/v3.4.0...v3.5.0)
+
+### Features
+
+* **api:** update via SDK Studio ([#74](https://github.com/digitalocean/gradient-python/issues/74)) ([e1ab040](https://github.com/digitalocean/gradient-python/commit/e1ab0407e88f5394f5c299940a4b2fe72dbbf70e))
+
+
+### Chores
+
+* **internal:** detect missing future annotations with ruff ([0fb9f92](https://github.com/digitalocean/gradient-python/commit/0fb9f9254a0f72a721fa73823399e58eec723f1a))
+
+## 3.4.0 (2025-10-09)
+
+Full Changelog: [v3.3.0...v3.4.0](https://github.com/digitalocean/gradient-python/compare/v3.3.0...v3.4.0)
+
+### Features
+
+* **api:** manual updates ([bbd7ddc](https://github.com/digitalocean/gradient-python/commit/bbd7ddccfb3d98f39e61948365b92202b3cc9e33))
+
+## 3.3.0 (2025-10-07)
+
+Full Changelog: [v3.2.0...v3.3.0](https://github.com/digitalocean/gradient-python/compare/v3.2.0...v3.3.0)
+
+### Features
+
+* **api:** Images generations - openai ([e5a309e](https://github.com/digitalocean/gradient-python/commit/e5a309e46bf05846c580f425e6fa23f323138a4d))
+* **api:** update via SDK Studio ([c2bf693](https://github.com/digitalocean/gradient-python/commit/c2bf693d233830dafdfc2aa7f74e2ced2e8d81a0))
+
+## 3.2.0 (2025-10-06)
+
+Full Changelog: [v3.1.0...v3.2.0](https://github.com/digitalocean/gradient-python/compare/v3.1.0...v3.2.0)
+
+### Features
+
+* **api:** Images generations ([37bf67a](https://github.com/digitalocean/gradient-python/commit/37bf67af6097a6396e8f96a64d9224312355ff0f))
+
+## 3.1.0 (2025-10-03)
+
+Full Changelog: [v3.0.2...v3.1.0](https://github.com/digitalocean/gradient-python/compare/v3.0.2...v3.1.0)
+
+### Features
+
+* **api:** update via SDK Studio ([20f2512](https://github.com/digitalocean/gradient-python/commit/20f251223fbe35fbe170b07be41fa6fd2656eed7))
+* **api:** update via SDK Studio ([09bf61b](https://github.com/digitalocean/gradient-python/commit/09bf61b5c24b1299a84ea6e8d4df3b88118d9fc3))
+* **api:** update via SDK Studio ([76d29b6](https://github.com/digitalocean/gradient-python/commit/76d29b61ce039f3f270715135ab4d0f444a52b3c))
+* **api:** update via SDK Studio ([fa68fb4](https://github.com/digitalocean/gradient-python/commit/fa68fb43e3e175b3dacd62d459b5d8c38b07e367))
+* **api:** update via SDK Studio ([e23ac14](https://github.com/digitalocean/gradient-python/commit/e23ac14538e17e8d33c33335285389cf13eefe04))
+* **api:** update via SDK Studio ([a5f6aa6](https://github.com/digitalocean/gradient-python/commit/a5f6aa656021a9aaa6a2e82dfa251f87f0096de0))
+* **api:** update via SDK Studio ([b900d76](https://github.com/digitalocean/gradient-python/commit/b900d769ba4a290523f17d2d69de850366c961b6))
+
+
+### Chores
+
+* **client:** support model_access_key in image generations ([4b81c5c](https://github.com/digitalocean/gradient-python/commit/4b81c5cf4998707ca2b4eff25845f687e2002602))
+* **client:** support model_access_key in image generations for real ([c202e81](https://github.com/digitalocean/gradient-python/commit/c202e81d81732217a839a0c7c5e56178252362a1))
+* fix bash quoting ([d92383d](https://github.com/digitalocean/gradient-python/commit/d92383da134a32cb0ae6f5a1c3044ec4947deacc))
+* quote bash variables ([6673263](https://github.com/digitalocean/gradient-python/commit/6673263dbdee2ae77eabd2f6d88cf61921f9e63c))
+* remove preview warning ([e4cf6a8](https://github.com/digitalocean/gradient-python/commit/e4cf6a8b5b37acf483be7301aa0a661a5db43a05))
+* update actions versions ([7056460](https://github.com/digitalocean/gradient-python/commit/7056460cef8093329da4ed24f2e7bd286213e90d))
+
+## 3.0.2 (2025-09-24)
+
+Full Changelog: [v3.0.1...v3.0.2](https://github.com/digitalocean/gradient-python/compare/v3.0.1...v3.0.2)
+
+### Chores
+
+* do not install brew dependencies in ./scripts/bootstrap by default ([d83b77a](https://github.com/digitalocean/gradient-python/commit/d83b77a943d7beb3373eebc543cdc787371753a5))
+* improve example values ([8f3a107](https://github.com/digitalocean/gradient-python/commit/8f3a107935a7ef0aa7e0e93161a24c7ecf24a272))
+* **types:** change optional parameter type from NotGiven to Omit ([78eb019](https://github.com/digitalocean/gradient-python/commit/78eb019c87cc55186abffd92f1d710d0c6ef0895))
+
+## 3.0.1 (2025-09-24)
+
+Full Changelog: [v3.0.0...v3.0.1](https://github.com/digitalocean/gradient-python/compare/v3.0.0...v3.0.1)
+
+### Bug Fixes
+
+* add proto to default inference url ([#52](https://github.com/digitalocean/gradient-python/issues/52)) ([108d7cb](https://github.com/digitalocean/gradient-python/commit/108d7cb79f4d9046136cbc03cf92056575d04f7a))
+
+## 3.0.0 (2025-09-18)
+
+Full Changelog: [v3.0.0-beta.6...v3.0.0](https://github.com/digitalocean/gradient-python/compare/v3.0.0-beta.6...v3.0.0)
+
+### Chores
+
+* remove deprecated env vars ([#50](https://github.com/digitalocean/gradient-python/issues/50)) ([32292f5](https://github.com/digitalocean/gradient-python/commit/32292f5d7cab21cfaa68577a6f838d134842e3fc))
+* remove old folders ([60545d7](https://github.com/digitalocean/gradient-python/commit/60545d7857d8c78c23fba888cc5eae29330eb521))
+* update author ([695cc57](https://github.com/digitalocean/gradient-python/commit/695cc572e7f506617b1a37ed600f4e485dbe26c0))
+
+
+### Refactors
+
+* **api:** consistently rename user_agent parameter to user_agent_package in Gradient and AsyncGradient classes for clarity ([af7420c](https://github.com/digitalocean/gradient-python/commit/af7420c654bd30af4e30a939e31960ba6414adb7))
+* **api:** rename user_agent parameter to user_agent_package in BaseClient, SyncAPIClient, and AsyncAPIClient for better clarity ([dba36f7](https://github.com/digitalocean/gradient-python/commit/dba36f7bae0b3d28a0013f5d23c482b7be5e238a))
+
+## 3.0.0-beta.6 (2025-09-17)
+
+Full Changelog: [v3.0.0-beta.5...v3.0.0-beta.6](https://github.com/digitalocean/gradient-python/compare/v3.0.0-beta.5...v3.0.0-beta.6)
+
+### Features
+
+* **api:** enable typescript ([c17086a](https://github.com/digitalocean/gradient-python/commit/c17086aaed18fbb8ba85f050556a193cdc4a233f))
+* improve future compat with pydantic v3 ([300eac0](https://github.com/digitalocean/gradient-python/commit/300eac0417f8f17a65bb871b15de1254f4677558))
+* normalize user agent with other do clients ([85bc8eb](https://github.com/digitalocean/gradient-python/commit/85bc8eb26afdfd7deb28ce2198eb3ef02181b95f))
+* **types:** replace List[str] with SequenceNotStr in params ([5a6aa92](https://github.com/digitalocean/gradient-python/commit/5a6aa9241b5e7c2f4319caa14d62f41c0c824f9e))
+
+
+### Chores
+
+* clean up LICENSING after legal review ([#49](https://github.com/digitalocean/gradient-python/issues/49)) ([7212f62](https://github.com/digitalocean/gradient-python/commit/7212f62b6d3a5bbc7c8422a7fd8f336d22792049))
+* **internal:** move mypy configurations to `pyproject.toml` file ([25c0448](https://github.com/digitalocean/gradient-python/commit/25c044818b636e3307af2fefd2add15a6e650e8d))
+* **internal:** update pydantic dependency ([55255fb](https://github.com/digitalocean/gradient-python/commit/55255fb5d51bca4204f5e741024f4184da465d78))
+* **tests:** simplify `get_platform` test ([b839e4b](https://github.com/digitalocean/gradient-python/commit/b839e4b31c1262157544bd69536051a10d6b098d))
+
+## 3.0.0-beta.5 (2025-09-08)
+
+Full Changelog: [v3.0.0-beta.4...v3.0.0-beta.5](https://github.com/digitalocean/gradient-python/compare/v3.0.0-beta.4...v3.0.0-beta.5)
+
+### Features
+
+* **api:** manual updates ([044a233](https://github.com/digitalocean/gradient-python/commit/044a2339f9ae89facbed403d8240d1e4cf3e9c1f))
+* **api:** manual updates ([0e8fd1b](https://github.com/digitalocean/gradient-python/commit/0e8fd1b364751ec933cadf02be693afa63a67029))
+
+
+### Bug Fixes
+
+* avoid newer type syntax ([3d5c35c](https://github.com/digitalocean/gradient-python/commit/3d5c35ca11b4c7344308f7fbd7cd98ec44dd65a0))
+
+
+### Chores
+
+* **internal:** add Sequence related utils ([2997cfc](https://github.com/digitalocean/gradient-python/commit/2997cfc25bf46b4cc9faf9f0f22cb4680cadca8b))
+* **internal:** change ci workflow machines ([5f41b3d](https://github.com/digitalocean/gradient-python/commit/5f41b3d956bf1ae25f90b862d5057c16b06e78a3))
+* **internal:** update pyright exclude list ([2a0d1a2](https://github.com/digitalocean/gradient-python/commit/2a0d1a2b174990d6b081ff764b13949b4dfa107f))
+* update github action ([369c5d9](https://github.com/digitalocean/gradient-python/commit/369c5d982cfadfaaaeda9481b2c9249e3f87423d))
+
+## 3.0.0-beta.4 (2025-08-12)
+
+Full Changelog: [v3.0.0-beta.3...v3.0.0-beta.4](https://github.com/digitalocean/gradient-python/compare/v3.0.0-beta.3...v3.0.0-beta.4)
+
+### Chores
+
+* **internal:** codegen related update ([4757cc5](https://github.com/digitalocean/gradient-python/commit/4757cc594565cf8500b4087205e6eb5fd8c5d5c5))
+* **internal:** update comment in script ([c324412](https://github.com/digitalocean/gradient-python/commit/c32441201c3156cc4fe5b400a4f396eaf19ecaad))
+* update @stainless-api/prism-cli to v5.15.0 ([835aa7c](https://github.com/digitalocean/gradient-python/commit/835aa7c204f5def64cdcd8b863581fd6a1ea37b6))
+
+## 3.0.0-beta.3 (2025-08-08)
+
+Full Changelog: [v3.0.0-beta.2...v3.0.0-beta.3](https://github.com/digitalocean/gradient-python/compare/v3.0.0-beta.2...v3.0.0-beta.3)
+
+### Features
+
+* **api:** make kwargs match the env vars ([b74952e](https://github.com/digitalocean/gradient-python/commit/b74952e665a92a50937f475ef68331d85d96e018))
+* **api:** rename environment variables ([ed70ab7](https://github.com/digitalocean/gradient-python/commit/ed70ab72ce3faecd7fb5070f429275518b7aa6f2))
+
+
+### Bug Fixes
+
+* actually read env vars ([68daceb](https://github.com/digitalocean/gradient-python/commit/68daceb4cf89b76fbf04e5111cea7541a989afed))
+* **config:** align environment variables with other DO tools and console ([#40](https://github.com/digitalocean/gradient-python/issues/40)) ([#41](https://github.com/digitalocean/gradient-python/issues/41)) ([6853d05](https://github.com/digitalocean/gradient-python/commit/6853d0542055a29a70685cab67414e5612890c7d))
+* use of cached variable in internals ([4bd6ace](https://github.com/digitalocean/gradient-python/commit/4bd6ace92d2dbfe1364c5f5aa8e0bf5899e8fc16))
+
+
+### Chores
+
+* **internal:** fix ruff target version ([b370349](https://github.com/digitalocean/gradient-python/commit/b370349a68d24b00854e3f54df50c86f2c29651b))
+
+## 3.0.0-beta.2 (2025-08-04)
+
+Full Changelog: [v3.0.0-beta.1...v3.0.0-beta.2](https://github.com/digitalocean/gradient-python/compare/v3.0.0-beta.1...v3.0.0-beta.2)
+
+### Features
+
+* **api:** collected updates 8/4 ([90ff9f2](https://github.com/digitalocean/gradient-python/commit/90ff9f227aa00805deb270e8e1de0ea9b56e3b4e))
+
+## 3.0.0-beta.1 (2025-07-31)
+
+Full Changelog: [v0.1.0-beta.4...v3.0.0-beta.1](https://github.com/digitalocean/gradient-python/compare/v0.1.0-beta.4...v3.0.0-beta.1)
+
+### Features
+
+* **api:** remove GRADIENTAI env vars ([43d5c5a](https://github.com/digitalocean/gradient-python/commit/43d5c5a6f22e108e1727e6abae9199c1ba2481da))
+* **api:** update to package gradient ([9dcd1d6](https://github.com/digitalocean/gradient-python/commit/9dcd1d6c53d31e7da58a7828a0864fc7f633b22c))
+* **api:** update to package gradient ([3099c15](https://github.com/digitalocean/gradient-python/commit/3099c154ab5fc3fd104349ce9069cdd18485104d))
+* **client:** support file upload requests ([90a77c9](https://github.com/digitalocean/gradient-python/commit/90a77c93c1a0b4a565fbb78f37e69ed6709df223))
+
+
+### Chores
+
+* update SDK settings ([b7d59f7](https://github.com/digitalocean/gradient-python/commit/b7d59f71d0d511e2ec9bdbf5e548d5e5bf946832))
+* update SDK settings ([3b18c48](https://github.com/digitalocean/gradient-python/commit/3b18c48f0c5dbb3f70e73b9a2654d820c8f6a882))
+* update SDK settings ([df18f3a](https://github.com/digitalocean/gradient-python/commit/df18f3a44bdc859e78130aa229e7fd0bfc0af906))
+* update SDK settings ([33893b0](https://github.com/digitalocean/gradient-python/commit/33893b0a60acc7746e7a60b5066e332547210c38))
+* whitespace cleanup ([dd13d32](https://github.com/digitalocean/gradient-python/commit/dd13d321f46cf779fcb841c12068216875f551e0))
+
+## 0.1.0-beta.4 (2025-07-29)
+
+Full Changelog: [v0.1.0-beta.3...v0.1.0-beta.4](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-beta.3...v0.1.0-beta.4)
+
+### Features
+
+* **api:** update via SDK Studio ([3018b4c](https://github.com/digitalocean/gradientai-python/commit/3018b4cc758839eda46617170a24f181d9a0b70b))
+* **api:** update via SDK Studio ([4292abf](https://github.com/digitalocean/gradientai-python/commit/4292abf5ba2e89dedf7f7660f6e274e42a163ae0))
+* **api:** update via SDK Studio ([2252d77](https://github.com/digitalocean/gradientai-python/commit/2252d77e753a1407a1b851e01f4dcdbf1d4e0697))
+* **api:** update via SDK Studio ([7d7d879](https://github.com/digitalocean/gradientai-python/commit/7d7d879480a1d85ac8329cb98fa8da8afd8fee12))
+
+## 0.1.0-beta.3 (2025-07-25)
+
+Full Changelog: [v0.1.0-beta.2...v0.1.0-beta.3](https://github.com/digitalocean/gradient-python/compare/v0.1.0-beta.2...v0.1.0-beta.3)
+
+### Bug Fixes
+
+* **parsing:** parse extra field types ([93bea71](https://github.com/digitalocean/gradient-python/commit/93bea71735195fa3f32de6b64bbc0aaac60a6d6c))
+
+
+### Chores
+
+* **project:** add settings file for vscode ([3b597aa](https://github.com/digitalocean/gradient-python/commit/3b597aa96e1f588506de47d782444992383f5522))
+* update README with new gradient name ([03157fb](https://github.com/digitalocean/gradient-python/commit/03157fb38616c68568024ab7e426b45d414bf432))
+
+## 0.1.0-beta.2 (2025-07-22)
+
+Full Changelog: [v0.1.0-beta.1...v0.1.0-beta.2](https://github.com/digitalocean/gradient-python/compare/v0.1.0-beta.1...v0.1.0-beta.2)
+
+### Bug Fixes
+
+* **parsing:** ignore empty metadata ([cee9728](https://github.com/digitalocean/gradient-python/commit/cee9728fd727cd600d2ac47ead9206ca937f7757))
+
+
+### Chores
+
+* **internal:** version bump ([e13ccb0](https://github.com/digitalocean/gradient-python/commit/e13ccb069743fc6ebc56e0bb0463ff11864ad944))
+* **internal:** version bump ([00ee94d](https://github.com/digitalocean/gradient-python/commit/00ee94d848ae5c5fc4604160c822e4757c4e6de8))
+* **types:** rebuild Pydantic models after all types are defined ([db7d61c](https://github.com/digitalocean/gradient-python/commit/db7d61c02df9f86af9170d38539257e9cbf3eff9))
+
+## 0.1.0-beta.1 (2025-07-21)
+
+Full Changelog: [v0.1.0-alpha.19...v0.1.0-beta.1](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.19...v0.1.0-beta.1)
+
+### Features
+
+* **api:** manual updates ([fda6270](https://github.com/digitalocean/gradient-python/commit/fda62708a8f4d4fd66187edd54b39336b88a7e1c))
+* **api:** manual updates ([7548648](https://github.com/digitalocean/gradient-python/commit/75486489df49297376fe0bcff70f1e527764b64d))
+
+
+### Chores
+
+* **internal:** version bump ([be22c3d](https://github.com/digitalocean/gradient-python/commit/be22c3d8c9835b45643d5e91db093108cb03f893))
+* **internal:** version bump ([2774d54](https://github.com/digitalocean/gradient-python/commit/2774d540184f8ca7d401c77eaa69a52f62e8514b))
+* **internal:** version bump ([44abb37](https://github.com/digitalocean/gradient-python/commit/44abb37d897dc77c1fda511b195cc9297fd324ac))
+* **internal:** version bump ([981ba17](https://github.com/digitalocean/gradient-python/commit/981ba17925e46a9f87a141a481645711fbb6bb6e))
+
+## 0.1.0-alpha.19 (2025-07-19)
+
+Full Changelog: [v0.1.0-alpha.18...v0.1.0-alpha.19](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.18...v0.1.0-alpha.19)
+
+### Features
+
+* **api:** manual updates ([2c36a8b](https://github.com/digitalocean/gradient-python/commit/2c36a8be83bb24025adf921c24acba3d666bf25d))
+
+
+### Chores
+
+* **internal:** version bump ([2864090](https://github.com/digitalocean/gradient-python/commit/2864090c0af4858e4bee35aef2113e6983cfdca4))
+
+## 0.1.0-alpha.18 (2025-07-19)
+
+Full Changelog: [v0.1.0-alpha.17...v0.1.0-alpha.18](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.17...v0.1.0-alpha.18)
+
+### Features
+
+* **api:** manual updates ([92d54ed](https://github.com/digitalocean/gradient-python/commit/92d54edfff94931f10fb8dac822764edf6fca6bd))
+* **api:** manual updates ([688982c](https://github.com/digitalocean/gradient-python/commit/688982c143e0ebca62f6ac39c1e074a2fd4083fc))
+
+
+### Chores
+
+* **internal:** version bump ([ecb4bae](https://github.com/digitalocean/gradient-python/commit/ecb4baedce933efc4ae99e0ef47100a02a68c9cd))
+* **internal:** version bump ([feb32ce](https://github.com/digitalocean/gradient-python/commit/feb32ce78b107e9414be87e8c34d8c3274105cb4))
+* update pypi package name ([656dfe0](https://github.com/digitalocean/gradient-python/commit/656dfe01d8e301dd1f93b3fa447434e6a5b41270))
+
+## 0.1.0-alpha.17 (2025-07-19)
+
+Full Changelog: [v0.1.0-alpha.16...v0.1.0-alpha.17](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.16...v0.1.0-alpha.17)
+
+### Chores
+
+* **internal:** version bump ([bc0b77b](https://github.com/digitalocean/gradient-python/commit/bc0b77b663dc5837a2e341b70b1cda31224a6d9d))
+* **internal:** version bump ([503666f](https://github.com/digitalocean/gradient-python/commit/503666fa61c23e584a22273371850f520100984a))
+* **internal:** version bump ([394991e](https://github.com/digitalocean/gradient-python/commit/394991e1f436ac2fa3581a3e1bab02e8a95f94b9))
+* **internal:** version bump ([7ae18a1](https://github.com/digitalocean/gradient-python/commit/7ae18a15cc889c8b0ffe5879824745e964cdd637))
+
+## 0.1.0-alpha.16 (2025-07-18)
+
+Full Changelog: [v0.1.0-alpha.15...v0.1.0-alpha.16](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.15...v0.1.0-alpha.16)
+
+### Chores
+
+* **internal:** version bump ([02f1f68](https://github.com/digitalocean/gradient-python/commit/02f1f686505028155ee2a4cf670794117ce7981a))
+
+## 0.1.0-alpha.15 (2025-07-18)
+
+Full Changelog: [v0.1.0-alpha.14...v0.1.0-alpha.15](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.14...v0.1.0-alpha.15)
+
+### Features
+
+* **api:** add gpu droplets ([b207e9a](https://github.com/digitalocean/gradient-python/commit/b207e9a69ddf821522f5d9e9f10502850220585f))
+* **api:** add gpu droplets ([b9e317b](https://github.com/digitalocean/gradient-python/commit/b9e317bac2c541a7eafcfb59a4b19c81e1145075))
+
+
+### Chores
+
+* format ([d940e66](https://github.com/digitalocean/gradient-python/commit/d940e66107e00f351853c0bc667ca6ed3cf98605))
+* **internal:** version bump ([1a66126](https://github.com/digitalocean/gradient-python/commit/1a661264f68580dff74c3f7d4891ab2661fde190))
+* **internal:** version bump ([9c546a1](https://github.com/digitalocean/gradient-python/commit/9c546a1f97241bb448430e1e43f4e20589e243c1))
+* **internal:** version bump ([8814098](https://github.com/digitalocean/gradient-python/commit/881409847161671b798baf2c89f37ae29e195f29))
+* **internal:** version bump ([bb3ad60](https://github.com/digitalocean/gradient-python/commit/bb3ad60d02fe01b937eaced64682fd66d95a9aec))
+* **internal:** version bump ([2022024](https://github.com/digitalocean/gradient-python/commit/20220246634accf95c4a53df200db5ace7107c55))
+* **internal:** version bump ([52e2c23](https://github.com/digitalocean/gradient-python/commit/52e2c23c23d4dc27c176ebf4783c8fbd86a4c07b))
+* **internal:** version bump ([8ac0f2a](https://github.com/digitalocean/gradient-python/commit/8ac0f2a6d4862907243ba78b132373289e2c3543))
+* **internal:** version bump ([d83fe97](https://github.com/digitalocean/gradient-python/commit/d83fe97aa2f77c84c3c7f4bf40b9fb94c5c28aca))
+* **internal:** version bump ([9d20399](https://github.com/digitalocean/gradient-python/commit/9d2039919e1d9c9e6d153edfb03bccff18b56686))
+* **internal:** version bump ([44a045a](https://github.com/digitalocean/gradient-python/commit/44a045a9c0ce0f0769cce66bc7421a9d81cbc645))
+* **internal:** version bump ([95d1dd2](https://github.com/digitalocean/gradient-python/commit/95d1dd24d290d7d5f23328e4c45c439dca5df748))
+* **internal:** version bump ([7416147](https://github.com/digitalocean/gradient-python/commit/74161477f98e3a76b7227b07d942e1f26a4612b3))
+* **internal:** version bump ([06d7f19](https://github.com/digitalocean/gradient-python/commit/06d7f19cd42a6bc578b39709fe6efed8741a24bc))
+
+## 0.1.0-alpha.14 (2025-07-17)
+
+Full Changelog: [v0.1.0-alpha.13...v0.1.0-alpha.14](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.13...v0.1.0-alpha.14)
+
+### Features
+
+* **api:** update via SDK Studio ([6cdcc6a](https://github.com/digitalocean/gradient-python/commit/6cdcc6a36b9dde2117295ee7bcb9a3bc15571779))
+* **api:** update via SDK Studio ([5475a94](https://github.com/digitalocean/gradient-python/commit/5475a9460676d1c48e99e0d1e75e50de7caecf3a))
+* dynamically build domain for agents.chat.completions.create() ([dee4ef0](https://github.com/digitalocean/gradient-python/commit/dee4ef07ebb3367abc7f05c15271d43ab57e2081))
+* dynamically build domain for agents.chat.completions.create() ([3dbd194](https://github.com/digitalocean/gradient-python/commit/3dbd194643e31907a78ab7e222e95e7508378ada))
+
+
+### Bug Fixes
+
+* add /api prefix for agent routes ([00c62b3](https://github.com/digitalocean/gradient-python/commit/00c62b35f3a29ea8b6e7c96b2e755e6b5199ae55))
+* add /api prefix for agent routes ([72a59db](https://github.com/digitalocean/gradient-python/commit/72a59db98ebeccdf0c4498f6cce37ffe1cb198dd))
+* fix validation for inference_key and agent_key auth ([d27046d](https://github.com/digitalocean/gradient-python/commit/d27046d0c1e8214dd09ab5508e4fcb11fa549dfe))
+
+
+### Chores
+
+* **internal:** version bump ([f3629f1](https://github.com/digitalocean/gradient-python/commit/f3629f169267f240aeb2c4d400606761a649dff7))
+
+## 0.1.0-alpha.13 (2025-07-15)
+
+Full Changelog: [v0.1.0-alpha.12...v0.1.0-alpha.13](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.12...v0.1.0-alpha.13)
+
+### Features
+
+* **api:** manual updates ([bd6fecc](https://github.com/digitalocean/gradient-python/commit/bd6feccf97fa5877085783419f11dad04c57d700))
+* **api:** manual updates ([c2b96ce](https://github.com/digitalocean/gradient-python/commit/c2b96ce3d95cc9b74bffd8d6a499927eefd23b14))
+* **api:** share chat completion chunk model between chat and agent.chat ([d67371f](https://github.com/digitalocean/gradient-python/commit/d67371f9f4d0761ea03097820bc3e77654b4d2bf))
+* clean up environment call outs ([64ee5b4](https://github.com/digitalocean/gradient-python/commit/64ee5b449c0195288d0a1dc55d2725e8cdd6afcf))
+
+
+### Bug Fixes
+
+* **client:** don't send Content-Type header on GET requests ([507a342](https://github.com/digitalocean/gradient-python/commit/507a342fbcc7c801ba36708e56ea2d2a28a1a392))
+* **parsing:** correctly handle nested discriminated unions ([569e473](https://github.com/digitalocean/gradient-python/commit/569e473d422928597ccf762133d5e52ac9a8665a))
+
+
+### Chores
+
+* **internal:** bump pinned h11 dep ([6f4e960](https://github.com/digitalocean/gradient-python/commit/6f4e960b6cb838cbf5e50301375fcb4b60a2cfb3))
+* **internal:** codegen related update ([1df657d](https://github.com/digitalocean/gradient-python/commit/1df657d9b384cb85d27fe839c0dab212a7773f8f))
+* **package:** mark python 3.13 as supported ([1a899b6](https://github.com/digitalocean/gradient-python/commit/1a899b66a484986672a380e405f09b1ae94b6310))
+* **readme:** fix version rendering on pypi ([6fbe83b](https://github.com/digitalocean/gradient-python/commit/6fbe83b11a9e3dbb40cf7f9f627abbbd086ee24a))
+
+## 0.1.0-alpha.12 (2025-07-02)
+
+Full Changelog: [v0.1.0-alpha.11...v0.1.0-alpha.12](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.11...v0.1.0-alpha.12)
+
+### Bug Fixes
+
+* **ci:** correct conditional ([646b4c6](https://github.com/digitalocean/gradient-python/commit/646b4c62044c9bb5211c50e008ef30c777715acb))
+
+
+### Chores
+
+* **ci:** change upload type ([7449413](https://github.com/digitalocean/gradient-python/commit/7449413efc16c58bc484f5f5793aa9cd36c3f405))
+* **internal:** codegen related update ([434929c](https://github.com/digitalocean/gradient-python/commit/434929ce29b314182dec1542a3093c98ca0bb24a))
+
+## 0.1.0-alpha.11 (2025-06-28)
+
+Full Changelog: [v0.1.0-alpha.10...v0.1.0-alpha.11](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.10...v0.1.0-alpha.11)
+
+### Features
+
+* **api:** manual updates ([8d918dc](https://github.com/digitalocean/gradient-python/commit/8d918dcc45f03d799b3aed4e94276086e2d7ea9b))
+
+
+### Chores
+
+* **ci:** only run for pushes and fork pull requests ([adfb5b5](https://github.com/digitalocean/gradient-python/commit/adfb5b51149f667bf9a0b4b4c4c6418e91f843d8))
+* Move model providers ([8d918dc](https://github.com/digitalocean/gradient-python/commit/8d918dcc45f03d799b3aed4e94276086e2d7ea9b))
+
+## 0.1.0-alpha.10 (2025-06-28)
+
+Full Changelog: [v0.1.0-alpha.9...v0.1.0-alpha.10](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.9...v0.1.0-alpha.10)
+
+### Features
+
+* **api:** manual updates ([0e5effc](https://github.com/digitalocean/gradient-python/commit/0e5effc727cebe88ea38f0ec4c3fcb45ffeb4924))
+* **api:** manual updates ([d510ae0](https://github.com/digitalocean/gradient-python/commit/d510ae03f13669af7f47093af06a00609e9b7c07))
+* **api:** manual updates ([c5bc3ca](https://github.com/digitalocean/gradient-python/commit/c5bc3caa477945dc19bbf90661ffeea86370189d))
+
+## 0.1.0-alpha.9 (2025-06-28)
+
+Full Changelog: [v0.1.0-alpha.8...v0.1.0-alpha.9](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.8...v0.1.0-alpha.9)
+
+### Features
+
+* **api:** manual updates ([e0c210a](https://github.com/digitalocean/gradient-python/commit/e0c210a0ffde24bd2c5877689f8ab222288cc597))
+
+## 0.1.0-alpha.8 (2025-06-27)
+
+Full Changelog: [v0.1.0-alpha.7...v0.1.0-alpha.8](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.7...v0.1.0-alpha.8)
+
+### Features
+
+* **client:** setup streaming ([3fd6e57](https://github.com/digitalocean/gradient-python/commit/3fd6e575f6f5952860e42d8c1fa22ccb0b10c623))
+
+## 0.1.0-alpha.7 (2025-06-27)
+
+Full Changelog: [v0.1.0-alpha.6...v0.1.0-alpha.7](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.6...v0.1.0-alpha.7)
+
+### Features
+
+* **api:** manual updates ([63b9ec0](https://github.com/digitalocean/gradient-python/commit/63b9ec02a646dad258afbd048db8db1af8d4401b))
+* **api:** manual updates ([5247aee](https://github.com/digitalocean/gradient-python/commit/5247aee6d6052f6380fbe892d7c2bd9a8d0a32c0))
+* **api:** manual updates ([aa9e2c7](https://github.com/digitalocean/gradient-python/commit/aa9e2c78956162f6195fdbaa1c95754ee4af207e))
+* **client:** add agent_domain option ([b4b6260](https://github.com/digitalocean/gradient-python/commit/b4b62609a12a1dfa0b505e9ec54334b776fb0515))
+
+## 0.1.0-alpha.6 (2025-06-27)
+
+Full Changelog: [v0.1.0-alpha.5...v0.1.0-alpha.6](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.5...v0.1.0-alpha.6)
+
+### Features
+
+* **api:** manual updates ([04eb1be](https://github.com/digitalocean/gradient-python/commit/04eb1be35de7db04e1f0d4e1da8719b54a353bb5))
+
+## 0.1.0-alpha.5 (2025-06-27)
+
+Full Changelog: [v0.1.0-alpha.4...v0.1.0-alpha.5](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.4...v0.1.0-alpha.5)
+
+### Features
+
+* **api:** define api links and meta as shared models ([8d87001](https://github.com/digitalocean/gradient-python/commit/8d87001b51de17dd1a36419c0e926cef119f20b8))
+* **api:** update OpenAI spec and add endpoint/smodels ([e92c54b](https://github.com/digitalocean/gradient-python/commit/e92c54b05f1025b6173945524724143fdafc7728))
+* **api:** update via SDK Studio ([1ae76f7](https://github.com/digitalocean/gradient-python/commit/1ae76f78ce9e74f8fd555e3497299127e9aa6889))
+* **api:** update via SDK Studio ([98424f4](https://github.com/digitalocean/gradient-python/commit/98424f4a2c7e00138fb5eecf94ca72e2ffcc1212))
+* **api:** update via SDK Studio ([299fd1b](https://github.com/digitalocean/gradient-python/commit/299fd1b29b42f6f2581150e52dcf65fc73270862))
+* **api:** update via SDK Studio ([9a45427](https://github.com/digitalocean/gradient-python/commit/9a45427678644c34afe9792a2561f394718e64ff))
+* **api:** update via SDK Studio ([abe573f](https://github.com/digitalocean/gradient-python/commit/abe573fcc2233c7d71f0a925eea8fa9dd4d0fb91))
+* **api:** update via SDK Studio ([e5ce590](https://github.com/digitalocean/gradient-python/commit/e5ce59057792968892317215078ac2c11e811812))
+* **api:** update via SDK Studio ([1daa3f5](https://github.com/digitalocean/gradient-python/commit/1daa3f55a49b5411d1b378fce30aea3ccbccb6d7))
+* **api:** update via SDK Studio ([1c702b3](https://github.com/digitalocean/gradient-python/commit/1c702b340e4fd723393c0f02df2a87d03ca8c9bb))
+* **api:** update via SDK Studio ([891d6b3](https://github.com/digitalocean/gradient-python/commit/891d6b32e5bdb07d23abf898cec17a60ee64f99d))
+* **api:** update via SDK Studio ([dcbe442](https://github.com/digitalocean/gradient-python/commit/dcbe442efc67554e60b3b28360a4d9f7dcbb313a))
+* use inference key for chat.completions.create() ([5d38e2e](https://github.com/digitalocean/gradient-python/commit/5d38e2eb8604a0a4065d146ba71aa4a5a0e93d85))
+
+
+### Bug Fixes
+
+* **ci:** release-doctor — report correct token name ([4d2b3dc](https://github.com/digitalocean/gradient-python/commit/4d2b3dcefdefc3830d631c5ac27b58778a299983))
+
+
+### Chores
+
+* clean up pyproject ([78637e9](https://github.com/digitalocean/gradient-python/commit/78637e99816d459c27b4f2fd2f6d79c8d32ecfbe))
+* **internal:** codegen related update ([58d7319](https://github.com/digitalocean/gradient-python/commit/58d7319ce68c639c2151a3e96a5d522ec06ff96f))
+
+## 0.1.0-alpha.4 (2025-06-25)
+
+Full Changelog: [v0.1.0-alpha.3...v0.1.0-alpha.4](https://github.com/digitalocean/gradient-python/compare/v0.1.0-alpha.3...v0.1.0-alpha.4)
+
+### Features
+
+* **api:** update via SDK Studio ([d1ea884](https://github.com/digitalocean/gradient-python/commit/d1ea884c9be72b3f8804c5ba91bf4f77a3284a6c))
+* **api:** update via SDK Studio ([584f9f1](https://github.com/digitalocean/gradient-python/commit/584f9f1304b3612eb25f1438041d287592463438))
+* **api:** update via SDK Studio ([7aee6e5](https://github.com/digitalocean/gradient-python/commit/7aee6e55a0574fc1b6ab73a1777c92e4f3a940ea))
+* **api:** update via SDK Studio ([4212f62](https://github.com/digitalocean/gradient-python/commit/4212f62b19c44bcb12c02fe396e8c51dd89d3868))
+* **api:** update via SDK Studio ([b16cceb](https://github.com/digitalocean/gradient-python/commit/b16cceb63edb4253084036b693834bde5da10943))
+* **api:** update via SDK Studio ([34382c0](https://github.com/digitalocean/gradient-python/commit/34382c06c5d61ac97572cb4977d020e1ede9d4ff))
+* **api:** update via SDK Studio ([c33920a](https://github.com/digitalocean/gradient-python/commit/c33920aba0dc1f3b8f4f890ce706c86fd452dd6b))
+* **api:** update via SDK Studio ([359c8d8](https://github.com/digitalocean/gradient-python/commit/359c8d88cec1d60f0beb810b5a0139443d0a3348))
+* **api:** update via SDK Studio ([f27643e](https://github.com/digitalocean/gradient-python/commit/f27643e1e00f606029be919a7117801facc6e5b7))
+* **api:** update via SDK Studio ([e59144c](https://github.com/digitalocean/gradient-python/commit/e59144c2d474a4003fd28b8eded08814ffa8d2f3))
+* **api:** update via SDK Studio ([97e1768](https://github.com/digitalocean/gradient-python/commit/97e17687a348b8ef218c23a06729b6edb1ac5ea9))
+* **api:** update via SDK Studio ([eac41f1](https://github.com/digitalocean/gradient-python/commit/eac41f12912b8d32ffa23d225f4ca56fa5c72505))
+* **api:** update via SDK Studio ([1fa7ebb](https://github.com/digitalocean/gradient-python/commit/1fa7ebb0080db9087b82d29e7197e44dfbb1ebed))
+* **api:** update via SDK Studio ([aa2610a](https://github.com/digitalocean/gradient-python/commit/aa2610afe7da79429e05bff64b4796de7f525681))
+* **api:** update via SDK Studio ([e5c8d76](https://github.com/digitalocean/gradient-python/commit/e5c8d768388b16c06fcc2abee71a53dcc8b3e8c5))
+* **api:** update via SDK Studio ([5f700dc](https://github.com/digitalocean/gradient-python/commit/5f700dc7a4e757015d3bd6f2e82a311114b82d77))
+* **api:** update via SDK Studio ([c042496](https://github.com/digitalocean/gradient-python/commit/c04249614917198b1eb2324438605d99b719a1cf))
+* **api:** update via SDK Studio ([5ebec81](https://github.com/digitalocean/gradient-python/commit/5ebec81604a206eba5e75a7e8990bd7711ba8f47))
+* **api:** update via SDK Studio ([cac54a8](https://github.com/digitalocean/gradient-python/commit/cac54a81a3f22d34b2de0ebfac3c68a982178cad))
+* **api:** update via SDK Studio ([6d62ab0](https://github.com/digitalocean/gradient-python/commit/6d62ab00594d70df0458a0a401f866af15a9298e))
+* **api:** update via SDK Studio ([0ccc62c](https://github.com/digitalocean/gradient-python/commit/0ccc62cb8ef387e0aaf6784db25d5f99a587e5da))
+* **api:** update via SDK Studio ([e75adfb](https://github.com/digitalocean/gradient-python/commit/e75adfbd2d035e57ae110a1d78ea40fb116975e5))
+* **api:** update via SDK Studio ([8bd264b](https://github.com/digitalocean/gradient-python/commit/8bd264b4b4686ca078bf4eb4b5462f058406df3e))
+* **api:** update via SDK Studio ([6254ccf](https://github.com/digitalocean/gradient-python/commit/6254ccf45cbe50ca8191c7149824964f5d00d82f))
+* **api:** update via SDK Studio ([8f5761b](https://github.com/digitalocean/gradient-python/commit/8f5761b1d18fb48ad7488e6f0ad771c077eb7961))
+* **api:** update via SDK Studio ([f853616](https://github.com/digitalocean/gradient-python/commit/f8536166320d1d5bacf1d10a5edb2f71691dde8b))
+* **client:** add support for aiohttp ([494afde](https://github.com/digitalocean/gradient-python/commit/494afde754f735d1ba95011fc83d23d2410fcfdd))
+
+
+### Bug Fixes
+
+* **client:** correctly parse binary response | stream ([abba5be](https://github.com/digitalocean/gradient-python/commit/abba5be958d03a7e5ce7d1cbf8069c0bcf52ee20))
+* **tests:** fix: tests which call HTTP endpoints directly with the example parameters ([e649dcb](https://github.com/digitalocean/gradient-python/commit/e649dcb0f9416e9bf568cc9f3480d7e222052391))
+
+
+### Chores
+
+* **ci:** enable for pull requests ([b6b3f9e](https://github.com/digitalocean/gradient-python/commit/b6b3f9ea85918cfc6fc7304b2d21c340d82a0083))
+* **internal:** codegen related update ([4126872](https://github.com/digitalocean/gradient-python/commit/41268721eafd33fcca5688ca5dff7401f25bdeb2))
+* **internal:** codegen related update ([10b79fb](https://github.com/digitalocean/gradient-python/commit/10b79fb1d51bcff6ed0d18e5ccd18fd1cd75af9f))
+* **internal:** update conftest.py ([12e2103](https://github.com/digitalocean/gradient-python/commit/12e210389204ff74f504e1ec3aa5ba99f1b4971c))
+* **readme:** update badges ([6e40dc3](https://github.com/digitalocean/gradient-python/commit/6e40dc3fa4e33082be7b0bbf65d07e9ae9ac6370))
+* **tests:** add tests for httpx client instantiation & proxies ([7ecf66c](https://github.com/digitalocean/gradient-python/commit/7ecf66c58a124c153a32055967beacbd1a3bbcf3))
+* **tests:** run tests in parallel ([861dd6b](https://github.com/digitalocean/gradient-python/commit/861dd6b75956f2c12814ad32b05624d8d8537d52))
+* **tests:** skip some failing tests on the latest python versions ([75b4539](https://github.com/digitalocean/gradient-python/commit/75b45398c18e75be3389be20479f54521c2e474a))
+* update SDK settings ([ed595b0](https://github.com/digitalocean/gradient-python/commit/ed595b0a23df125ffba733d7339e771997c3f149))
+
+
+### Documentation
+
+* **client:** fix httpx.Timeout documentation reference ([5d452d7](https://github.com/digitalocean/gradient-python/commit/5d452d7245af6c80f47f8395f1c03493dfb53a52))
+
+## 0.1.0-alpha.3 (2025-06-12)
+
+Full Changelog: [v0.1.0-alpha.2...v0.1.0-alpha.3](https://github.com/digitalocean/genai-python/compare/v0.1.0-alpha.2...v0.1.0-alpha.3)
+
+### Chores
+
+* update SDK settings ([502bb34](https://github.com/digitalocean/genai-python/commit/502bb34e1693603cd572c756e8ce6aeba63d1283))
+
+## 0.1.0-alpha.2 (2025-06-12)
+
+Full Changelog: [v0.1.0-alpha.1...v0.1.0-alpha.2](https://github.com/digitalocean/genai-python/compare/v0.1.0-alpha.1...v0.1.0-alpha.2)
+
+### Chores
+
+* update SDK settings ([5b3b94b](https://github.com/digitalocean/genai-python/commit/5b3b94b57a4ba7837093617aafc2ce2d21ac87f1))
+
+## 0.1.0-alpha.1 (2025-06-12)
+
+Full Changelog: [v0.0.1-alpha.0...v0.1.0-alpha.1](https://github.com/digitalocean/genai-python/compare/v0.0.1-alpha.0...v0.1.0-alpha.1)
+
+### Features
+
+* **api:** update via SDK Studio ([1e202d0](https://github.com/digitalocean/genai-python/commit/1e202d01e3582ef5284380417d9f7e195bbc8a39))
+* **api:** update via SDK Studio ([e6103ad](https://github.com/digitalocean/genai-python/commit/e6103ad8134752e632cf1dae9cb09edf10fd7739))
+* **api:** update via SDK Studio ([bf61629](https://github.com/digitalocean/genai-python/commit/bf61629f25376f1cc32b910fbaea9feccfef9884))
+* **api:** update via SDK Studio ([c680ef3](https://github.com/digitalocean/genai-python/commit/c680ef3bac9874ef595edde2bd8f0ce5948ac6c4))
+* **api:** update via SDK Studio ([a4bb08b](https://github.com/digitalocean/genai-python/commit/a4bb08ba4829b5780511b78538e5cbbc276f1965))
+* **api:** update via SDK Studio ([691923d](https://github.com/digitalocean/genai-python/commit/691923d9f60b5ebe5dc34c8227273d06448945e8))
+* **client:** add follow_redirects request option ([5a6d480](https://github.com/digitalocean/genai-python/commit/5a6d480aef6d4c5084f484d1b69e6f49568a8caf))
+
+
+### Chores
+
+* **docs:** remove reference to rye shell ([29febe9](https://github.com/digitalocean/genai-python/commit/29febe9affcb0ae41ec69f8aea3ae6ef53967537))
+* **docs:** remove unnecessary param examples ([35ec489](https://github.com/digitalocean/genai-python/commit/35ec48915a8bd750060634208e91bd98c905b53c))
+* update SDK settings ([a095281](https://github.com/digitalocean/genai-python/commit/a095281b52c7ac5f096147e67b7b2e5bf342f95e))
+* update SDK settings ([d2c39ec](https://github.com/digitalocean/genai-python/commit/d2c39eceea1aaeaf0e6c2707af10c3998d222bda))
+* update SDK settings ([f032621](https://github.com/digitalocean/genai-python/commit/f03262136aa46e9325ac2fae785bf48a56f0127b))
+* update SDK settings ([b2cf700](https://github.com/digitalocean/genai-python/commit/b2cf700a0419f7d6e3f23ee02747fe7766a05f98))
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 79f5523c..670e32c7 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -17,8 +17,7 @@ $ rye sync --all-features
You can then run scripts using `rye run python script.py` or by activating the virtual environment:
```sh
-$ rye shell
-# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work
+# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work
$ source .venv/bin/activate
# now you can omit the `rye run` prefix
@@ -37,7 +36,7 @@ $ pip install -r requirements-dev.lock
Most of the SDK is generated code. Modifications to code will be persisted between generations, but may
result in merge conflicts between manual patches and changes from the generator. The generator will never
-modify the contents of the `src/digitalocean_genai_sdk/lib/` and `examples/` directories.
+modify the contents of the `src/gradient/lib/` and `examples/` directories.
## Adding and running examples
@@ -63,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g
To install via git:
```sh
-$ pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git
+$ pip install git+ssh://git@github.com/digitalocean/gradient-python.git
```
Alternatively, you can build from source and install the wheel file:
@@ -86,17 +85,45 @@ $ pip install ./path-to-wheel-file.whl
## Running tests
-Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
-
```sh
-# you will need npm installed
-$ npx prism mock path/to/your/openapi.yml
+$ ./scripts/test
```
-```sh
-$ ./scripts/test
+## Smoke tests & environment variables
+
+The repository includes a small set of live "smoke" tests (see the `smoke` pytest marker) that exercise real Gradient API endpoints. These are excluded from the default test run and only executed when you explicitly target them (`pytest -m smoke`) or in CI via the dedicated `smoke` job.
+
+Required environment variables for smoke tests (all must be set):
+
+| Variable | Purpose |
+|----------|---------|
+| `DIGITALOCEAN_ACCESS_TOKEN` | Access token for core DigitalOcean Gradient API operations (e.g. listing agents). |
+| `GRADIENT_MODEL_ACCESS_KEY` | Key used for serverless inference (chat completions, etc.). |
+| `GRADIENT_AGENT_ACCESS_KEY` | Key used for agent-scoped inference requests. |
+| `GRADIENT_AGENT_ENDPOINT` | Fully-qualified HTTPS endpoint for your deployed agent (e.g. `https://my-agent.agents.do-ai.run`). |
+
+Optional override:
+
+| Variable | Purpose |
+|----------|---------|
+| `GRADIENT_INFERENCE_ENDPOINT` | Override default inference endpoint (`https://inference.do-ai.run`). |
+
+Create a local `.env` file (never commit real secrets). A template is provided at `.env.example`.
+
+Key design notes:
+* Sync & async suites each have a single central test that asserts environment presence and client auto-loaded properties.
+* Other smoke tests intentionally avoid repeating environment / property assertions to keep noise low.
+* Add new credentials by updating the `REQUIRED_ENV_VARS` tuple in both smoke test files and documenting them here and in the README.
+
+Run smoke tests locally:
+
+```bash
+./scripts/smoke # convenience wrapper
+pytest -m smoke -q # direct invocation
```
+Do NOT run smoke tests against production credentials unless you understand the API calls performed—they make real network requests.
+
## Linting and formatting
This repository uses [ruff](https://github.com/astral-sh/ruff) and
@@ -121,7 +148,7 @@ the changes aren't made through the automated pipeline, you may want to make rel
### Publish with a GitHub workflow
-You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up.
+You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/gradient-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up.
### Publish manually
diff --git a/LICENSE b/LICENSE
index 9c99266b..5ab1db50 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,7 +1,201 @@
-Copyright 2025 digitalocean-genai-sdk
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+ 1. Definitions.
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2025 DigitalOcean, LLC
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
index b9fcd7e8..c69fdd1b 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,12 @@
-# Digitalocean Genai SDK Python API library
+
-[](https://pypi.org/project/digitalocean_genai_sdk/)
+# Gradient Python API library
-The Digitalocean Genai SDK Python library provides convenient access to the Digitalocean Genai SDK REST API from any Python 3.8+
+
+[)](https://pypi.org/project/gradient/)
+[](https://gradientai.digitalocean.com/getting-started/overview/)
+
+The Gradient Python library provides convenient access to the Gradient REST API from any Python 3.9+
application. The library includes type definitions for all request params and response fields,
and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx).
@@ -10,60 +14,111 @@ It is generated with [Stainless](https://www.stainless.com/).
## Documentation
-The REST API documentation can be found on [help.openai.com](https://help.openai.com/). The full API of this library can be found in [api.md](api.md).
+The getting started guide can be found on [gradient-sdk.digitalocean.com](https://gradient-sdk.digitalocean.com/getting-started/overview).
+The REST API documentation can be found on [developers.digitalocean.com](https://developers.digitalocean.com/documentation/v2/).
+The full API of this library can be found in [api.md](api.md).
## Installation
```sh
-# install from this staging repo
-pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git
+# install from PyPI
+pip install gradient
```
-> [!NOTE]
-> Once this package is [published to PyPI](https://app.stainless.com/docs/guides/publish), this will become: `pip install --pre digitalocean_genai_sdk`
-
## Usage
+The Gradient SDK provides clients for:
+* DigitalOcean API
+* Gradient Serverless Inference
+* Gradient Agent Inference
+
The full API of this library can be found in [api.md](api.md).
```python
import os
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK
+from gradient import Gradient
-client = DigitaloceanGenaiSDK(
- api_key=os.environ.get(
- "DIGITALOCEAN_GENAI_SDK_API_KEY"
+client = Gradient(
+ access_token=os.environ.get(
+ "DIGITALOCEAN_ACCESS_TOKEN"
+ ), # This is the default and can be omitted
+)
+inference_client = Gradient(
+ model_access_key=os.environ.get(
+ "GRADIENT_MODEL_ACCESS_KEY"
), # This is the default and can be omitted
)
+agent_client = Gradient(
+ agent_access_key=os.environ.get(
+ "GRADIENT_AGENT_ACCESS_KEY"
+ ), # This is the default and can be omitted
+ agent_endpoint="https://my-agent.agents.do-ai.run",
+)
-assistants = client.assistants.list()
-print(assistants.first_id)
+## API
+api_response = api_client.agents.list()
+print("--- API")
+if api_response.agents:
+ print(api_response.agents[0].name)
+
+
+## Serverless Inference
+inference_response = inference_client.chat.completions.create(
+ messages=[
+ {
+ "role": "user",
+ "content": "What is the capital of France?",
+ }
+ ],
+ model="llama3.3-70b-instruct",
+)
+
+print("--- Serverless Inference")
+print(inference_response.choices[0].message.content)
+
+## Agent Inference
+agent_response = agent_client.agents.chat.completions.create(
+ messages=[
+ {
+ "role": "user",
+ "content": "What is the capital of Portugal?",
+ }
+ ],
+ model="llama3.3-70b-instruct",
+)
+
+print("--- Agent Inference")
+print(agent_response.choices[0].message.content)
```
-While you can provide an `api_key` keyword argument,
+While you can provide an `access_token`, `model_access_key` keyword argument,
we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/)
-to add `DIGITALOCEAN_GENAI_SDK_API_KEY="My API Key"` to your `.env` file
-so that your API Key is not stored in source control.
+to add `DIGITALOCEAN_ACCESS_TOKEN="My Access Token"`, `GRADIENT_MODEL_ACCESS_KEY="My Model Access Key"` to your `.env` file
+so that your keys are not stored in source control.
## Async usage
-Simply import `AsyncDigitaloceanGenaiSDK` instead of `DigitaloceanGenaiSDK` and use `await` with each API call:
+Simply import `AsyncGradient` instead of `Gradient` and use `await` with each API call:
```python
import os
import asyncio
-from digitalocean_genai_sdk import AsyncDigitaloceanGenaiSDK
+from gradient import AsyncGradient
-client = AsyncDigitaloceanGenaiSDK(
- api_key=os.environ.get(
- "DIGITALOCEAN_GENAI_SDK_API_KEY"
- ), # This is the default and can be omitted
-)
+client = AsyncGradient()
async def main() -> None:
- assistants = await client.assistants.list()
- print(assistants.first_id)
+ completion = await client.chat.completions.create(
+ messages=[
+ {
+ "role": "user",
+ "content": "What is the capital of France?",
+ }
+ ],
+ model="llama3.3-70b-instruct",
+ )
+ print(completion.choices)
asyncio.run(main())
@@ -71,84 +126,154 @@ asyncio.run(main())
Functionality between the synchronous and asynchronous clients is otherwise identical.
-## Using types
-
-Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like:
+### With aiohttp
-- Serializing back into JSON, `model.to_json()`
-- Converting to a dictionary, `model.to_dict()`
+By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend.
-Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
+You can enable this by installing `aiohttp`:
-## Nested params
+```sh
+# install from PyPI
+pip install gradient[aiohttp]
+```
-Nested parameters are dictionaries, typed using `TypedDict`, for example:
+Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`:
```python
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK
+import os
+import asyncio
+from gradient import DefaultAioHttpClient
+from gradient import AsyncGradient
-client = DigitaloceanGenaiSDK()
-assistant_object = client.assistants.create(
- model="gpt-4o",
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
+async def main() -> None:
+ async with AsyncGradient(
+ model_access_key=os.environ.get(
+ "GRADIENT_MODEL_ACCESS_KEY"
+ ), # This is the default and can be omitted
+ http_client=DefaultAioHttpClient(),
+ ) as client:
+ completion = await client.chat.completions.create(
+ messages=[
{
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
+ "role": "user",
+ "content": "What is the capital of France?",
}
],
- },
- },
-)
-print(assistant_object.tool_resources)
+ model="llama3.3-70b-instruct",
+ )
+ print(completion.choices)
+
+
+asyncio.run(main())
```
-## File uploads
+## Streaming responses
-Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
+We provide support for streaming responses using Server Side Events (SSE).
```python
-from pathlib import Path
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK
+from gradient import Gradient
+
+client = Gradient()
+
+stream = client.chat.completions.create(
+ messages=[
+ {
+ "role": "user",
+ "content": "What is the capital of France?",
+ }
+ ],
+ model="llama3.3-70b-instruct",
+ stream=True,
+)
+for completion in stream:
+ print(completion.choices)
+```
-client = DigitaloceanGenaiSDK()
+The async client uses the exact same interface.
-client.audio.transcribe_audio(
- file=Path("/path/to/file"),
- model="gpt-4o-transcribe",
+```python
+from gradient import AsyncGradient
+
+client = AsyncGradient()
+
+stream = await client.chat.completions.create(
+ messages=[
+ {
+ "role": "user",
+ "content": "What is the capital of France?",
+ }
+ ],
+ model="llama3.3-70b-instruct",
+ stream=True,
)
+async for completion in stream:
+ print(completion.choices)
```
-The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically.
+## Using types
+
+Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like:
+
+- Serializing back into JSON, `model.to_json()`
+- Converting to a dictionary, `model.to_dict()`
+
+Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
+
+## Nested params
+
+Nested parameters are dictionaries, typed using `TypedDict`, for example:
+
+```python
+from gradient import Gradient
+
+client = Gradient()
+
+completion = client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream_options={},
+)
+print(completion.stream_options)
+```
## Handling errors
-When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `digitalocean_genai_sdk.APIConnectionError` is raised.
+When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradient.APIConnectionError` is raised.
When the API returns a non-success status code (that is, 4xx or 5xx
-response), a subclass of `digitalocean_genai_sdk.APIStatusError` is raised, containing `status_code` and `response` properties.
+response), a subclass of `gradient.APIStatusError` is raised, containing `status_code` and `response` properties.
-All errors inherit from `digitalocean_genai_sdk.APIError`.
+All errors inherit from `gradient.APIError`.
```python
-import digitalocean_genai_sdk
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK
+import gradient
+from gradient import Gradient
-client = DigitaloceanGenaiSDK()
+client = Gradient()
try:
- client.assistants.list()
-except digitalocean_genai_sdk.APIConnectionError as e:
+ client.chat.completions.create(
+ messages=[
+ {
+ "role": "user",
+ "content": "What is the capital of France?",
+ }
+ ],
+ model="llama3.3-70b-instruct",
+ )
+except gradient.APIConnectionError as e:
print("The server could not be reached")
print(e.__cause__) # an underlying Exception, likely raised within httpx.
-except digitalocean_genai_sdk.RateLimitError as e:
+except gradient.RateLimitError as e:
print("A 429 status code was received; we should back off a bit.")
-except digitalocean_genai_sdk.APIStatusError as e:
+except gradient.APIStatusError as e:
print("Another non-200-range status code was received")
print(e.status_code)
print(e.response)
@@ -176,39 +301,55 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ
You can use the `max_retries` option to configure or disable retry settings:
```python
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK
+from gradient import Gradient
# Configure the default for all requests:
-client = DigitaloceanGenaiSDK(
+client = Gradient(
# default is 2
max_retries=0,
)
# Or, configure per-request:
-client.with_options(max_retries=5).assistants.list()
+client.with_options(max_retries=5).chat.completions.create(
+ messages=[
+ {
+ "role": "user",
+ "content": "What is the capital of France?",
+ }
+ ],
+ model="llama3.3-70b-instruct",
+)
```
### Timeouts
By default requests time out after 1 minute. You can configure this with a `timeout` option,
-which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object:
+which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object:
```python
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK
+from gradient import Gradient
# Configure the default for all requests:
-client = DigitaloceanGenaiSDK(
+client = Gradient(
# 20 seconds (default is 1 minute)
timeout=20.0,
)
# More granular control:
-client = DigitaloceanGenaiSDK(
+client = Gradient(
timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0),
)
# Override per-request:
-client.with_options(timeout=5.0).assistants.list()
+client.with_options(timeout=5.0).chat.completions.create(
+ messages=[
+ {
+ "role": "user",
+ "content": "What is the capital of France?",
+ }
+ ],
+ model="llama3.3-70b-instruct",
+)
```
On timeout, an `APITimeoutError` is thrown.
@@ -221,10 +362,10 @@ Note that requests that time out are [retried twice by default](#retries).
We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module.
-You can enable logging by setting the environment variable `DIGITALOCEAN_GENAI_SDK_LOG` to `info`.
+You can enable logging by setting the environment variable `GRADIENT_LOG` to `info`.
```shell
-$ export DIGITALOCEAN_GENAI_SDK_LOG=info
+$ export GRADIENT_LOG=info
```
Or to `debug` for more verbose logging.
@@ -246,19 +387,25 @@ if response.my_field is None:
The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g.,
```py
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK
-
-client = DigitaloceanGenaiSDK()
-response = client.assistants.with_raw_response.list()
+from gradient import Gradient
+
+client = Gradient()
+response = client.chat.completions.with_raw_response.create(
+ messages=[{
+ "role": "user",
+ "content": "What is the capital of France?",
+ }],
+ model="llama3.3-70b-instruct",
+)
print(response.headers.get('X-My-Header'))
-assistant = response.parse() # get the object that `assistants.list()` would have returned
-print(assistant.first_id)
+completion = response.parse() # get the object that `chat.completions.create()` would have returned
+print(completion.choices)
```
-These methods return an [`APIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) object.
+These methods return an [`APIResponse`](https://github.com/digitalocean/gradient-python/tree/main/src/gradient/_response.py) object.
-The async client returns an [`AsyncAPIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content.
+The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradient-python/tree/main/src/gradient/_response.py) with the same structure, the only difference being `await`able methods for reading the response content.
#### `.with_streaming_response`
@@ -267,7 +414,15 @@ The above interface eagerly reads the full response body when you make the reque
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
```python
-with client.assistants.with_streaming_response.list() as response:
+with client.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "role": "user",
+ "content": "What is the capital of France?",
+ }
+ ],
+ model="llama3.3-70b-instruct",
+) as response:
print(response.headers.get("X-My-Header"))
for line in response.iter_lines():
@@ -320,10 +475,10 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c
```python
import httpx
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, DefaultHttpxClient
+from gradient import Gradient, DefaultHttpxClient
-client = DigitaloceanGenaiSDK(
- # Or use the `DIGITALOCEAN_GENAI_SDK_BASE_URL` env var
+client = Gradient(
+ # Or use the `GRADIENT_BASE_URL` env var
base_url="http://my.test.server.example.com:8083",
http_client=DefaultHttpxClient(
proxy="http://my.test.proxy.example.com",
@@ -343,9 +498,9 @@ client.with_options(http_client=DefaultHttpxClient(...))
By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting.
```py
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK
+from gradient import Gradient
-with DigitaloceanGenaiSDK() as client:
+with Gradient() as client:
# make requests here
...
@@ -362,7 +517,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con
We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience.
-We are keen for your feedback; please open an [issue](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/issues) with questions, bugs, or suggestions.
+We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/gradient-python/issues) with questions, bugs, or suggestions.
### Determining the installed version
@@ -371,14 +526,76 @@ If you've upgraded to the latest version but aren't seeing any new features you
You can determine the version that is being used at runtime with:
```py
-import digitalocean_genai_sdk
-print(digitalocean_genai_sdk.__version__)
+import gradient
+print(gradient.__version__)
```
## Requirements
-Python 3.8 or higher.
+Python 3.9 or higher.
## Contributing
See [the contributing documentation](./CONTRIBUTING.md).
+
+## Smoke tests
+
+The repository includes a small set of "smoke" tests that exercise live Gradient API / Inference / Agent endpoints to catch integration regressions early. These tests are intentionally excluded from the standard test run (they are marked with the `smoke` pytest marker) and only run in CI via the dedicated `smoke` job, or when you explicitly target them locally.
+
+### Required environment variables
+
+All of the following environment variables must be set for the smoke tests (both sync & async) to run. If any are missing the smoke tests will fail fast:
+
+| Variable | Purpose |
+|----------|---------|
+| `DIGITALOCEAN_ACCESS_TOKEN` | Access token for core DigitalOcean Gradient API operations (e.g. listing agents). |
+| `GRADIENT_MODEL_ACCESS_KEY` | Key used for serverless inference (chat completions, etc.). |
+| `GRADIENT_AGENT_ACCESS_KEY` | Key used for agent-scoped inference requests. |
+| `GRADIENT_AGENT_ENDPOINT` | Fully-qualified HTTPS endpoint for your deployed agent (e.g. `https://my-agent.agents.do-ai.run`). |
+
+> Optional override: `GRADIENT_INFERENCE_ENDPOINT` can be provided to point inference to a non-default endpoint (defaults to `https://inference.do-ai.run`).
+
+### Running smoke tests locally
+
+1. Export the required environment variables (or place them in a `.env` file and use a tool like `direnv` or `python-dotenv`).
+2. Run only the smoke tests:
+
+```bash
+rye run pytest -m smoke -q
+```
+
+To include them alongside the regular suite:
+
+```bash
+./scripts/test -m smoke
+```
+
+Convenience wrapper (auto-loads a local `.env` if present):
+
+```bash
+./scripts/smoke
+```
+
+See `.env.example` for a template of required variables you can copy into a `.env` file (do not commit secrets).
+
+### Async variants
+
+Each smoke test has an async counterpart in `tests/test_smoke_sdk_async.py`. Both sets are covered automatically by the `-m smoke` selection.
+
+### CI behavior
+
+The default `test` job excludes smoke tests (`-m 'not smoke'`). A separate `smoke` job runs on pushes to the main repository with the required secrets injected. This keeps contributors from inadvertently hitting live services while still providing integration coverage in controlled environments.
+
+### Adding new smoke tests
+
+1. Add a new test function to `tests/test_smoke_sdk.py` and/or `tests/test_smoke_sdk_async.py`.
+2. Mark it with `@pytest.mark.smoke`.
+3. Avoid duplicating environment or client property assertions—those live in the central environment/client state test (sync & async).
+4. Keep assertions minimal—verify only surface contract / structure; deeper behavior belongs in unit tests with mocks.
+
+If a new credential is required, update this README section, the `REQUIRED_ENV_VARS` list in both smoke test files, and the CI workflow's `smoke` job environment.
+
+
+## License
+
+Licensed under the Apache License 2.0. See [LICENSE](./LICENSE)
diff --git a/SECURITY.md b/SECURITY.md
index d08f7996..fe1c055c 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -16,7 +16,7 @@ before making any information public.
## Reporting Non-SDK Related Security Issues
If you encounter security issues that are not directly related to SDKs but pertain to the services
-or products provided by Digitalocean Genai SDK, please follow the respective company's security reporting guidelines.
+or products provided by Gradient, please follow the respective company's security reporting guidelines.
---
diff --git a/api.md b/api.md
index daea5075..45e4eaeb 100644
--- a/api.md
+++ b/api.md
@@ -1,627 +1,1077 @@
-# Assistants
+# Shared Types
+
+```python
+from gradient.types import (
+ Action,
+ ActionLink,
+ APILinks,
+ APIMeta,
+ BackwardLinks,
+ ChatCompletionChunk,
+ ChatCompletionTokenLogprob,
+ CompletionUsage,
+ CreateResponseResponse,
+ CreateResponseStreamResponse,
+ DiskInfo,
+ Droplet,
+ DropletNextBackupWindow,
+ FirewallRuleTarget,
+ ForwardLinks,
+ GarbageCollection,
+ GPUInfo,
+ Image,
+ ImageGenCompletedEvent,
+ ImageGenPartialImageEvent,
+ ImageGenStreamEvent,
+ Kernel,
+ MetaProperties,
+ NetworkV4,
+ NetworkV6,
+ PageLinks,
+ Region,
+ Size,
+ Snapshots,
+ Subscription,
+ SubscriptionTierBase,
+ VpcPeering,
+)
+```
+
+# Agents
Types:
```python
-from digitalocean_genai_sdk.types import (
- AssistantObject,
- AssistantSupportedModels,
- AssistantToolsCode,
- AssistantToolsFileSearch,
- AssistantToolsFunction,
- AssistantsAPIResponseFormatOption,
- FileSearchRanker,
- FunctionObject,
- ReasoningEffort,
- AssistantListResponse,
- AssistantDeleteResponse,
+from gradient.types import (
+ APIAgent,
+ APIAgentAPIKeyInfo,
+ APIAgentModel,
+ APIAnthropicAPIKeyInfo,
+ APIDeploymentVisibility,
+ APIOpenAIAPIKeyInfo,
+ APIRetrievalMethod,
+ APIWorkspace,
+ AgentCreateResponse,
+ AgentRetrieveResponse,
+ AgentUpdateResponse,
+ AgentListResponse,
+ AgentDeleteResponse,
+ AgentRetrieveUsageResponse,
+ AgentUpdateStatusResponse,
)
```
Methods:
-- client.assistants.create(\*\*params) -> AssistantObject
-- client.assistants.retrieve(assistant_id) -> AssistantObject
-- client.assistants.update(assistant_id, \*\*params) -> AssistantObject
-- client.assistants.list(\*\*params) -> AssistantListResponse
-- client.assistants.delete(assistant_id) -> AssistantDeleteResponse
+- client.agents.create(\*\*params) -> AgentCreateResponse
+- client.agents.retrieve(uuid) -> AgentRetrieveResponse
+- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse
+- client.agents.list(\*\*params) -> AgentListResponse
+- client.agents.delete(uuid) -> AgentDeleteResponse
+- client.agents.retrieve_usage(uuid, \*\*params) -> AgentRetrieveUsageResponse
+- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse
-# Audio
+## APIKeys
Types:
```python
-from digitalocean_genai_sdk.types import (
- TranscriptionSegment,
- VoiceIDsShared,
- AudioTranscribeAudioResponse,
- AudioTranslateAudioResponse,
+from gradient.types.agents import (
+ APIKeyCreateResponse,
+ APIKeyUpdateResponse,
+ APIKeyListResponse,
+ APIKeyDeleteResponse,
+ APIKeyRegenerateResponse,
)
```
Methods:
-- client.audio.generate_speech(\*\*params) -> BinaryAPIResponse
-- client.audio.transcribe_audio(\*\*params) -> AudioTranscribeAudioResponse
-- client.audio.translate_audio(\*\*params) -> AudioTranslateAudioResponse
+- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse
+- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse
+- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse
+- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse
+- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse
-# Batches
+## Chat
+
+### Completions
Types:
```python
-from digitalocean_genai_sdk.types import Batch, BatchListResponse
+from gradient.types.agents.chat import CompletionCreateResponse
```
Methods:
-- client.batches.create(\*\*params) -> Batch
-- client.batches.retrieve(batch_id) -> Batch
-- client.batches.list(\*\*params) -> BatchListResponse
-- client.batches.cancel(batch_id) -> Batch
+- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse
-# Chat
+## EvaluationMetrics
-## Completions
+Types:
+
+```python
+from gradient.types.agents import EvaluationMetricListResponse, EvaluationMetricListRegionsResponse
+```
+
+Methods:
+
+- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse
+- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse
+
+### Workspaces
+
+Types:
+
+```python
+from gradient.types.agents.evaluation_metrics import (
+ WorkspaceCreateResponse,
+ WorkspaceRetrieveResponse,
+ WorkspaceUpdateResponse,
+ WorkspaceListResponse,
+ WorkspaceDeleteResponse,
+ WorkspaceListEvaluationTestCasesResponse,
+)
+```
+
+Methods:
+
+- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse
+- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse
+- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse
+- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse
+- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse
+- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse
+
+#### Agents
Types:
```python
-from digitalocean_genai_sdk.types.chat import (
- CreateModelProperties,
- CreateResponse,
- MessageToolCall,
- ModelIDsShared,
- RequestMessageContentPartText,
- ResponseFormatJsonObject,
- ResponseFormatJsonSchema,
- ResponseFormatText,
- ResponseMessage,
- TokenLogprob,
- Usage,
- WebSearchContextSize,
- WebSearchLocation,
- CompletionListResponse,
- CompletionDeleteResponse,
- CompletionListMessagesResponse,
+from gradient.types.agents.evaluation_metrics.workspaces import AgentListResponse, AgentMoveResponse
+```
+
+Methods:
+
+- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse
+- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse
+
+### Anthropic
+
+#### Keys
+
+Types:
+
+```python
+from gradient.types.agents.evaluation_metrics.anthropic import (
+ KeyCreateResponse,
+ KeyRetrieveResponse,
+ KeyUpdateResponse,
+ KeyListResponse,
+ KeyDeleteResponse,
+ KeyListAgentsResponse,
)
```
Methods:
-- client.chat.completions.create(\*\*params) -> CreateResponse
-- client.chat.completions.retrieve(completion_id) -> CreateResponse
-- client.chat.completions.update(completion_id, \*\*params) -> CreateResponse
-- client.chat.completions.list(\*\*params) -> CompletionListResponse
-- client.chat.completions.delete(completion_id) -> CompletionDeleteResponse
-- client.chat.completions.list_messages(completion_id, \*\*params) -> CompletionListMessagesResponse
+- client.agents.evaluation_metrics.anthropic.keys.create(\*\*params) -> KeyCreateResponse
+- client.agents.evaluation_metrics.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
+- client.agents.evaluation_metrics.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
+- client.agents.evaluation_metrics.anthropic.keys.list(\*\*params) -> KeyListResponse
+- client.agents.evaluation_metrics.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse
+- client.agents.evaluation_metrics.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse
+
+### OpenAI
-# Completions
+#### Keys
Types:
```python
-from digitalocean_genai_sdk.types import (
- ChatCompletionStreamOptions,
- StopConfiguration,
- CompletionCreateResponse,
+from gradient.types.agents.evaluation_metrics.openai import (
+ KeyCreateResponse,
+ KeyRetrieveResponse,
+ KeyUpdateResponse,
+ KeyListResponse,
+ KeyDeleteResponse,
+ KeyListAgentsResponse,
)
```
Methods:
-- client.completions.create(\*\*params) -> CompletionCreateResponse
+- client.agents.evaluation_metrics.openai.keys.create(\*\*params) -> KeyCreateResponse
+- client.agents.evaluation_metrics.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
+- client.agents.evaluation_metrics.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
+- client.agents.evaluation_metrics.openai.keys.list(\*\*params) -> KeyListResponse
+- client.agents.evaluation_metrics.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse
+- client.agents.evaluation_metrics.openai.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse
+
+### Oauth2
+
+Types:
+
+```python
+from gradient.types.agents.evaluation_metrics import Oauth2GenerateURLResponse
+```
+
+Methods:
+
+- client.agents.evaluation_metrics.oauth2.generate_url(\*\*params) -> Oauth2GenerateURLResponse
-# Embeddings
+#### Dropbox
Types:
```python
-from digitalocean_genai_sdk.types import EmbeddingCreateResponse
+from gradient.types.agents.evaluation_metrics.oauth2 import DropboxCreateTokensResponse
```
Methods:
-- client.embeddings.create(\*\*params) -> EmbeddingCreateResponse
+- client.agents.evaluation_metrics.oauth2.dropbox.create_tokens(\*\*params) -> DropboxCreateTokensResponse
-# Files
+### ScheduledIndexing
Types:
```python
-from digitalocean_genai_sdk.types import (
- OpenAIFile,
- FileListResponse,
- FileDeleteResponse,
- FileRetrieveContentResponse,
+from gradient.types.agents.evaluation_metrics import (
+ ScheduledIndexingCreateResponse,
+ ScheduledIndexingRetrieveResponse,
+ ScheduledIndexingDeleteResponse,
)
```
Methods:
-- client.files.retrieve(file_id) -> OpenAIFile
-- client.files.list(\*\*params) -> FileListResponse
-- client.files.delete(file_id) -> FileDeleteResponse
-- client.files.retrieve_content(file_id) -> str
-- client.files.upload(\*\*params) -> OpenAIFile
+- client.agents.evaluation_metrics.scheduled_indexing.create(\*\*params) -> ScheduledIndexingCreateResponse
+- client.agents.evaluation_metrics.scheduled_indexing.retrieve(knowledge_base_uuid) -> ScheduledIndexingRetrieveResponse
+- client.agents.evaluation_metrics.scheduled_indexing.delete(uuid) -> ScheduledIndexingDeleteResponse
+
+## EvaluationRuns
-# FineTuning
+Types:
+
+```python
+from gradient.types.agents import (
+ APIEvaluationMetric,
+ APIEvaluationMetricResult,
+ APIEvaluationPrompt,
+ APIEvaluationRun,
+ EvaluationRunCreateResponse,
+ EvaluationRunRetrieveResponse,
+ EvaluationRunListResultsResponse,
+ EvaluationRunRetrieveResultsResponse,
+)
+```
+
+Methods:
-## Checkpoints
+- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse
+- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse
+- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse
+- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse
-### Permissions
+## EvaluationTestCases
Types:
```python
-from digitalocean_genai_sdk.types.fine_tuning.checkpoints import (
- ListFineTuningCheckpointPermission,
- PermissionDeleteResponse,
+from gradient.types.agents import (
+ APIEvaluationTestCase,
+ APIStarMetric,
+ EvaluationTestCaseCreateResponse,
+ EvaluationTestCaseRetrieveResponse,
+ EvaluationTestCaseUpdateResponse,
+ EvaluationTestCaseListResponse,
+ EvaluationTestCaseListEvaluationRunsResponse,
)
```
Methods:
-- client.fine_tuning.checkpoints.permissions.create(permission_id, \*\*params) -> ListFineTuningCheckpointPermission
-- client.fine_tuning.checkpoints.permissions.retrieve(permission_id, \*\*params) -> ListFineTuningCheckpointPermission
-- client.fine_tuning.checkpoints.permissions.delete(permission_id) -> PermissionDeleteResponse
+- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse
+- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse
+- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse
+- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse
+- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse
-## Jobs
+## EvaluationDatasets
Types:
```python
-from digitalocean_genai_sdk.types.fine_tuning import FineTuneMethod, FineTuningJob, JobListResponse
+from gradient.types.agents import (
+ EvaluationDatasetCreateResponse,
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse,
+)
+```
+
+Methods:
+
+- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse
+- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse
+
+## Functions
+
+Types:
+
+```python
+from gradient.types.agents import (
+ FunctionCreateResponse,
+ FunctionUpdateResponse,
+ FunctionDeleteResponse,
+)
+```
+
+Methods:
+
+- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse
+- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse
+- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse
+
+## Versions
+
+Types:
+
+```python
+from gradient.types.agents import VersionUpdateResponse, VersionListResponse
```
Methods:
-- client.fine_tuning.jobs.create(\*\*params) -> FineTuningJob
-- client.fine_tuning.jobs.retrieve(fine_tuning_job_id) -> FineTuningJob
-- client.fine_tuning.jobs.list(\*\*params) -> JobListResponse
-- client.fine_tuning.jobs.cancel(fine_tuning_job_id) -> FineTuningJob
+- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse
+- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse
-### Checkpoints
+## KnowledgeBases
Types:
```python
-from digitalocean_genai_sdk.types.fine_tuning.jobs import CheckpointRetrieveResponse
+from gradient.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse
```
Methods:
-- client.fine_tuning.jobs.checkpoints.retrieve(fine_tuning_job_id, \*\*params) -> CheckpointRetrieveResponse
+- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput
+- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput
+- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse
-### Events
+## Routes
Types:
```python
-from digitalocean_genai_sdk.types.fine_tuning.jobs import EventRetrieveResponse
+from gradient.types.agents import (
+ RouteUpdateResponse,
+ RouteDeleteResponse,
+ RouteAddResponse,
+ RouteViewResponse,
+)
```
Methods:
-- client.fine_tuning.jobs.events.retrieve(fine_tuning_job_id, \*\*params) -> EventRetrieveResponse
+- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse
+- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse
+- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse
+- client.agents.routes.view(uuid) -> RouteViewResponse
+
+# Chat
+
+## Completions
+
+Types:
+
+```python
+from gradient.types.chat import CompletionCreateResponse
+```
+
+Methods:
+
+- client.chat.completions.create(\*\*params) -> CompletionCreateResponse
# Images
Types:
```python
-from digitalocean_genai_sdk.types import ImagesResponse
+from gradient.types import ImageGenerateResponse
```
Methods:
-- client.images.create_edit(\*\*params) -> ImagesResponse
-- client.images.create_generation(\*\*params) -> ImagesResponse
-- client.images.create_variation(\*\*params) -> ImagesResponse
+- client.images.generate(\*\*params) -> ImageGenerateResponse
-# Models
+# Responses
+
+Methods:
+
+- client.responses.create(\*\*params) -> CreateResponseResponse
+
+# GPUDroplets
Types:
```python
-from digitalocean_genai_sdk.types import Model, ModelListResponse, ModelDeleteResponse
+from gradient.types import (
+ DropletBackupPolicy,
+ GPUDropletCreateResponse,
+ GPUDropletRetrieveResponse,
+ GPUDropletListResponse,
+ GPUDropletListFirewallsResponse,
+ GPUDropletListKernelsResponse,
+ GPUDropletListNeighborsResponse,
+ GPUDropletListSnapshotsResponse,
+)
```
Methods:
-- client.models.retrieve(model) -> Model
-- client.models.list() -> ModelListResponse
-- client.models.delete(model) -> ModelDeleteResponse
+- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse
+- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse
+- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse
+- client.gpu_droplets.delete(droplet_id) -> None
+- client.gpu_droplets.delete_by_tag(\*\*params) -> None
+- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse
+- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse
+- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse
+- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse
-# Moderations
+## Backups
Types:
```python
-from digitalocean_genai_sdk.types import ModerationClassifyResponse
+from gradient.types.gpu_droplets import (
+ BackupListResponse,
+ BackupListPoliciesResponse,
+ BackupListSupportedPoliciesResponse,
+ BackupRetrievePolicyResponse,
+)
```
Methods:
-- client.moderations.classify(\*\*params) -> ModerationClassifyResponse
+- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse
+- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse
+- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse
+- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse
-# Organization
+## Actions
Types:
```python
-from digitalocean_genai_sdk.types import (
- AuditLogActorUser,
- AuditLogEventType,
- UsageResponse,
- OrganizationListAuditLogsResponse,
+from gradient.types.gpu_droplets import (
+ ActionRetrieveResponse,
+ ActionListResponse,
+ ActionBulkInitiateResponse,
+ ActionInitiateResponse,
)
```
Methods:
-- client.organization.get_costs(\*\*params) -> UsageResponse
-- client.organization.list_audit_logs(\*\*params) -> OrganizationListAuditLogsResponse
+- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse
+- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse
+- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse
+- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse
-## AdminAPIKeys
+## DestroyWithAssociatedResources
Types:
```python
-from digitalocean_genai_sdk.types.organization import (
- AdminAPIKey,
- AdminAPIKeyListResponse,
- AdminAPIKeyDeleteResponse,
+from gradient.types.gpu_droplets import (
+ AssociatedResource,
+ DestroyedAssociatedResource,
+ DestroyWithAssociatedResourceListResponse,
+ DestroyWithAssociatedResourceCheckStatusResponse,
)
```
Methods:
-- client.organization.admin_api_keys.create(\*\*params) -> AdminAPIKey
-- client.organization.admin_api_keys.retrieve(key_id) -> AdminAPIKey
-- client.organization.admin_api_keys.list(\*\*params) -> AdminAPIKeyListResponse
-- client.organization.admin_api_keys.delete(key_id) -> AdminAPIKeyDeleteResponse
+- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse
+- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse
+- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None
+- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None
+- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None
-## Invites
+## Autoscale
Types:
```python
-from digitalocean_genai_sdk.types.organization import (
- Invite,
- InviteListResponse,
- InviteDeleteResponse,
+from gradient.types.gpu_droplets import (
+ AutoscalePool,
+ AutoscalePoolDropletTemplate,
+ AutoscalePoolDynamicConfig,
+ AutoscalePoolStaticConfig,
+ CurrentUtilization,
+ AutoscaleCreateResponse,
+ AutoscaleRetrieveResponse,
+ AutoscaleUpdateResponse,
+ AutoscaleListResponse,
+ AutoscaleListHistoryResponse,
+ AutoscaleListMembersResponse,
)
```
Methods:
-- client.organization.invites.create(\*\*params) -> Invite
-- client.organization.invites.retrieve(invite_id) -> Invite
-- client.organization.invites.list(\*\*params) -> InviteListResponse
-- client.organization.invites.delete(invite_id) -> InviteDeleteResponse
+- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse
+- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse
+- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse
+- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse
+- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None
+- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None
+- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse
+- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse
-## Projects
+## Firewalls
Types:
```python
-from digitalocean_genai_sdk.types.organization import Project, ProjectListResponse
+from gradient.types.gpu_droplets import (
+ Firewall,
+ FirewallCreateResponse,
+ FirewallRetrieveResponse,
+ FirewallUpdateResponse,
+ FirewallListResponse,
+)
```
Methods:
-- client.organization.projects.create(\*\*params) -> Project
-- client.organization.projects.retrieve(project_id) -> Project
-- client.organization.projects.update(project_id, \*\*params) -> Project
-- client.organization.projects.list(\*\*params) -> ProjectListResponse
-- client.organization.projects.archive(project_id) -> Project
+- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse
+- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse
+- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse
+- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse
+- client.gpu_droplets.firewalls.delete(firewall_id) -> None
-### APIKeys
+### Droplets
+
+Methods:
+
+- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None
+- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None
+
+### Tags
+
+Methods:
+
+- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None
+- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None
+
+### Rules
+
+Methods:
+
+- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None
+- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None
+
+## FloatingIPs
Types:
```python
-from digitalocean_genai_sdk.types.organization.projects import (
- APIKey,
- APIKeyListResponse,
- APIKeyDeleteResponse,
+from gradient.types.gpu_droplets import (
+ FloatingIP,
+ FloatingIPCreateResponse,
+ FloatingIPRetrieveResponse,
+ FloatingIPListResponse,
)
```
Methods:
-- client.organization.projects.api_keys.retrieve(key_id, \*, project_id) -> APIKey
-- client.organization.projects.api_keys.list(project_id, \*\*params) -> APIKeyListResponse
-- client.organization.projects.api_keys.delete(key_id, \*, project_id) -> APIKeyDeleteResponse
+- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse
+- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse
+- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse
+- client.gpu_droplets.floating_ips.delete(floating_ip) -> None
-### RateLimits
+### Actions
Types:
```python
-from digitalocean_genai_sdk.types.organization.projects import RateLimit, RateLimitListResponse
+from gradient.types.gpu_droplets.floating_ips import (
+ ActionCreateResponse,
+ ActionRetrieveResponse,
+ ActionListResponse,
+)
```
Methods:
-- client.organization.projects.rate_limits.update(rate_limit_id, \*, project_id, \*\*params) -> RateLimit
-- client.organization.projects.rate_limits.list(project_id, \*\*params) -> RateLimitListResponse
+- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse
+- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse
+- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse
-### ServiceAccounts
+## Images
Types:
```python
-from digitalocean_genai_sdk.types.organization.projects import (
- ServiceAccount,
- ServiceAccountCreateResponse,
- ServiceAccountListResponse,
- ServiceAccountDeleteResponse,
+from gradient.types.gpu_droplets import (
+ ImageCreateResponse,
+ ImageRetrieveResponse,
+ ImageUpdateResponse,
+ ImageListResponse,
)
```
Methods:
-- client.organization.projects.service_accounts.create(project_id, \*\*params) -> ServiceAccountCreateResponse
-- client.organization.projects.service_accounts.retrieve(service_account_id, \*, project_id) -> ServiceAccount
-- client.organization.projects.service_accounts.list(project_id, \*\*params) -> ServiceAccountListResponse
-- client.organization.projects.service_accounts.delete(service_account_id, \*, project_id) -> ServiceAccountDeleteResponse
+- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse
+- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse
+- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse
+- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse
+- client.gpu_droplets.images.delete(image_id) -> None
+
+### Actions
+
+Types:
+
+```python
+from gradient.types.gpu_droplets.images import ActionListResponse
+```
+
+Methods:
+
+- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action
+- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action
+- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse
-### Users
+## LoadBalancers
Types:
```python
-from digitalocean_genai_sdk.types.organization.projects import (
- ProjectUser,
- UserListResponse,
- UserDeleteResponse,
+from gradient.types.gpu_droplets import (
+ Domains,
+ ForwardingRule,
+ GlbSettings,
+ HealthCheck,
+ LbFirewall,
+ LoadBalancer,
+ StickySessions,
+ LoadBalancerCreateResponse,
+ LoadBalancerRetrieveResponse,
+ LoadBalancerUpdateResponse,
+ LoadBalancerListResponse,
)
```
Methods:
-- client.organization.projects.users.retrieve(user_id, \*, project_id) -> ProjectUser
-- client.organization.projects.users.update(user_id, \*, project_id, \*\*params) -> ProjectUser
-- client.organization.projects.users.list(project_id, \*\*params) -> UserListResponse
-- client.organization.projects.users.delete(user_id, \*, project_id) -> UserDeleteResponse
-- client.organization.projects.users.add(project_id, \*\*params) -> ProjectUser
+- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse
+- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse
+- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse
+- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse
+- client.gpu_droplets.load_balancers.delete(lb_id) -> None
+- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None
+
+### Droplets
+
+Methods:
+
+- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None
+- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None
+
+### ForwardingRules
+
+Methods:
+
+- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None
+- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None
+
+## Sizes
+
+Types:
-## Usage
+```python
+from gradient.types.gpu_droplets import SizeListResponse
+```
Methods:
-- client.organization.usage.audio_speeches(\*\*params) -> UsageResponse
-- client.organization.usage.audio_transcriptions(\*\*params) -> UsageResponse
-- client.organization.usage.code_interpreter_sessions(\*\*params) -> UsageResponse
-- client.organization.usage.completions(\*\*params) -> UsageResponse
-- client.organization.usage.embeddings(\*\*params) -> UsageResponse
-- client.organization.usage.images(\*\*params) -> UsageResponse
-- client.organization.usage.moderations(\*\*params) -> UsageResponse
-- client.organization.usage.vector_stores(\*\*params) -> UsageResponse
+- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse
-## Users
+## Snapshots
Types:
```python
-from digitalocean_genai_sdk.types.organization import (
- OrganizationUser,
- UserListResponse,
- UserDeleteResponse,
+from gradient.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse
+```
+
+Methods:
+
+- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse
+- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse
+- client.gpu_droplets.snapshots.delete(snapshot_id) -> None
+
+## Volumes
+
+Types:
+
+```python
+from gradient.types.gpu_droplets import (
+ VolumeCreateResponse,
+ VolumeRetrieveResponse,
+ VolumeListResponse,
)
```
Methods:
-- client.organization.users.retrieve(user_id) -> OrganizationUser
-- client.organization.users.update(user_id, \*\*params) -> OrganizationUser
-- client.organization.users.list(\*\*params) -> UserListResponse
-- client.organization.users.delete(user_id) -> UserDeleteResponse
+- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse
+- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse
+- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse
+- client.gpu_droplets.volumes.delete(volume_id) -> None
+- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None
-# Realtime
+### Actions
Types:
```python
-from digitalocean_genai_sdk.types import (
- RealtimeCreateSessionResponse,
- RealtimeCreateTranscriptionSessionResponse,
+from gradient.types.gpu_droplets.volumes import (
+ VolumeAction,
+ ActionRetrieveResponse,
+ ActionListResponse,
+ ActionInitiateByIDResponse,
+ ActionInitiateByNameResponse,
)
```
Methods:
-- client.realtime.create_session(\*\*params) -> RealtimeCreateSessionResponse
-- client.realtime.create_transcription_session(\*\*params) -> RealtimeCreateTranscriptionSessionResponse
+- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse
+- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse
+- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse
+- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse
-# Responses
+### Snapshots
Types:
```python
-from digitalocean_genai_sdk.types import (
- ComputerToolCall,
- ComputerToolCallOutput,
- ComputerToolCallSafetyCheck,
- FileSearchToolCall,
- FunctionToolCall,
- FunctionToolCallOutput,
- Includable,
- InputContent,
- InputMessage,
- ModelResponseProperties,
- OutputMessage,
- ReasoningItem,
- Response,
- ResponseProperties,
- WebSearchToolCall,
- ResponseListInputItemsResponse,
+from gradient.types.gpu_droplets.volumes import (
+ SnapshotCreateResponse,
+ SnapshotRetrieveResponse,
+ SnapshotListResponse,
)
```
Methods:
-- client.responses.create(\*\*params) -> Response
-- client.responses.retrieve(response_id, \*\*params) -> Response
-- client.responses.delete(response_id) -> None
-- client.responses.list_input_items(response_id, \*\*params) -> ResponseListInputItemsResponse
+- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse
+- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse
+- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse
+- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None
+
+## Account
-# Threads
+### Keys
Types:
```python
-from digitalocean_genai_sdk.types import CreateThreadRequest, ThreadObject, ThreadDeleteResponse
+from gradient.types.gpu_droplets.account import (
+ SSHKeys,
+ KeyCreateResponse,
+ KeyRetrieveResponse,
+ KeyUpdateResponse,
+ KeyListResponse,
+)
```
Methods:
-- client.threads.create(\*\*params) -> ThreadObject
-- client.threads.retrieve(thread_id) -> ThreadObject
-- client.threads.update(thread_id, \*\*params) -> ThreadObject
-- client.threads.delete(thread_id) -> ThreadDeleteResponse
+- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse
+- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse
+- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse
+- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse
+- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None
+
+# Inference
-## Runs
+## APIKeys
Types:
```python
-from digitalocean_genai_sdk.types.threads import (
- AssistantsAPIToolChoiceOption,
- RunObject,
- TruncationObject,
- RunListResponse,
+from gradient.types.inference import (
+ APIModelAPIKeyInfo,
+ APIKeyCreateResponse,
+ APIKeyUpdateResponse,
+ APIKeyListResponse,
+ APIKeyDeleteResponse,
+ APIKeyUpdateRegenerateResponse,
)
```
Methods:
-- client.threads.runs.create(\*\*params) -> RunObject
-- client.threads.runs.retrieve(run_id, \*, thread_id) -> RunObject
-- client.threads.runs.update(run_id, \*, thread_id, \*\*params) -> RunObject
-- client.threads.runs.list(thread_id, \*\*params) -> RunListResponse
-- client.threads.runs.cancel(run_id, \*, thread_id) -> RunObject
-- client.threads.runs.create_run(thread_id, \*\*params) -> RunObject
-- client.threads.runs.submit_tool_outputs(run_id, \*, thread_id, \*\*params) -> RunObject
+- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse
+- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse
+- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse
+- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse
+- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse
-### Steps
+# KnowledgeBases
Types:
```python
-from digitalocean_genai_sdk.types.threads.runs import RunStepObject, StepListResponse
+from gradient.types import (
+ APIKnowledgeBase,
+ KnowledgeBaseCreateResponse,
+ KnowledgeBaseRetrieveResponse,
+ KnowledgeBaseUpdateResponse,
+ KnowledgeBaseListResponse,
+ KnowledgeBaseDeleteResponse,
+ KnowledgeBaseListIndexingJobsResponse,
+)
```
Methods:
-- client.threads.runs.steps.retrieve(step_id, \*, thread_id, run_id, \*\*params) -> RunStepObject
-- client.threads.runs.steps.list(run_id, \*, thread_id, \*\*params) -> StepListResponse
+- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse
+- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse
+- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse
+- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse
+- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse
+- client.knowledge_bases.list_indexing_jobs(knowledge_base_uuid) -> KnowledgeBaseListIndexingJobsResponse
-## Messages
+## DataSources
Types:
```python
-from digitalocean_genai_sdk.types.threads import (
- AssistantToolsFileSearchTypeOnly,
- CreateMessageRequest,
- MessageContentImageFileObject,
- MessageContentImageURLObject,
- MessageObject,
- MessageListResponse,
- MessageDeleteResponse,
+from gradient.types.knowledge_bases import (
+ APIFileUploadDataSource,
+ APIKnowledgeBaseDataSource,
+ APISpacesDataSource,
+ APIWebCrawlerDataSource,
+ AwsDataSource,
+ DataSourceCreateResponse,
+ DataSourceUpdateResponse,
+ DataSourceListResponse,
+ DataSourceDeleteResponse,
+ DataSourceCreatePresignedURLsResponse,
)
```
Methods:
-- client.threads.messages.create(thread_id, \*\*params) -> MessageObject
-- client.threads.messages.retrieve(message_id, \*, thread_id) -> MessageObject
-- client.threads.messages.update(message_id, \*, thread_id, \*\*params) -> MessageObject
-- client.threads.messages.list(thread_id, \*\*params) -> MessageListResponse
-- client.threads.messages.delete(message_id, \*, thread_id) -> MessageDeleteResponse
+- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse
+- client.knowledge_bases.data_sources.update(path_data_source_uuid, \*, path_knowledge_base_uuid, \*\*params) -> DataSourceUpdateResponse
+- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse
+- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse
+- client.knowledge_bases.data_sources.create_presigned_urls(\*\*params) -> DataSourceCreatePresignedURLsResponse
+
+## IndexingJobs
+
+Types:
+
+```python
+from gradient.types.knowledge_bases import (
+ APIIndexedDataSource,
+ APIIndexingJob,
+ IndexingJobCreateResponse,
+ IndexingJobRetrieveResponse,
+ IndexingJobListResponse,
+ IndexingJobRetrieveDataSourcesResponse,
+ IndexingJobRetrieveSignedURLResponse,
+ IndexingJobUpdateCancelResponse,
+)
+```
+
+Methods:
+
+- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse
+- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse
+- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse
+- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse
+- client.knowledge_bases.indexing_jobs.retrieve_signed_url(indexing_job_uuid) -> IndexingJobRetrieveSignedURLResponse
+- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse
+
+# Models
+
+Types:
+
+```python
+from gradient.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse
+```
+
+Methods:
+
+- client.models.list(\*\*params) -> ModelListResponse
+
+## Providers
-# Uploads
+### Anthropic
Types:
```python
-from digitalocean_genai_sdk.types import Upload, UploadAddPartResponse
+from gradient.types.models.providers import (
+ AnthropicCreateResponse,
+ AnthropicRetrieveResponse,
+ AnthropicUpdateResponse,
+ AnthropicListResponse,
+ AnthropicDeleteResponse,
+ AnthropicListAgentsResponse,
+)
```
Methods:
-- client.uploads.create(\*\*params) -> Upload
-- client.uploads.add_part(upload_id, \*\*params) -> UploadAddPartResponse
-- client.uploads.cancel(upload_id) -> Upload
-- client.uploads.complete(upload_id, \*\*params) -> Upload
+- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse
+- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse
+- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse
+- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse
+- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse
+- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse
-# VectorStores
+### OpenAI
Types:
```python
-from digitalocean_genai_sdk.types import (
- AutoChunkingStrategyRequestParam,
- ComparisonFilter,
- CompoundFilter,
- StaticChunkingStrategy,
- StaticChunkingStrategyRequestParam,
- VectorStoreExpirationAfter,
- VectorStoreObject,
- VectorStoreListResponse,
- VectorStoreDeleteResponse,
- VectorStoreSearchResponse,
+from gradient.types.models.providers import (
+ OpenAICreateResponse,
+ OpenAIRetrieveResponse,
+ OpenAIUpdateResponse,
+ OpenAIListResponse,
+ OpenAIDeleteResponse,
+ OpenAIRetrieveAgentsResponse,
)
```
Methods:
-- client.vector_stores.create(\*\*params) -> VectorStoreObject
-- client.vector_stores.retrieve(vector_store_id) -> VectorStoreObject
-- client.vector_stores.update(vector_store_id, \*\*params) -> VectorStoreObject
-- client.vector_stores.list(\*\*params) -> VectorStoreListResponse
-- client.vector_stores.delete(vector_store_id) -> VectorStoreDeleteResponse
-- client.vector_stores.search(vector_store_id, \*\*params) -> VectorStoreSearchResponse
+- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse
+- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse
+- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse
+- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse
+- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse
+- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse
+
+# Regions
+
+Types:
+
+```python
+from gradient.types import RegionListResponse
+```
+
+Methods:
+
+- client.regions.list(\*\*params) -> RegionListResponse
+
+# Databases
+
+## SchemaRegistry
-## FileBatches
+### Config
Types:
```python
-from digitalocean_genai_sdk.types.vector_stores import (
- ChunkingStrategyRequestParam,
- ListVectorStoreFilesResponse,
- VectorStoreFileBatchObject,
+from gradient.types.databases.schema_registry import (
+ ConfigRetrieveResponse,
+ ConfigUpdateResponse,
+ ConfigRetrieveSubjectResponse,
+ ConfigUpdateSubjectResponse,
)
```
Methods:
-- client.vector_stores.file_batches.create(vector_store_id, \*\*params) -> VectorStoreFileBatchObject
-- client.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatchObject
-- client.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatchObject
-- client.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> ListVectorStoreFilesResponse
+- client.databases.schema_registry.config.retrieve(database_cluster_uuid) -> ConfigRetrieveResponse
+- client.databases.schema_registry.config.update(database_cluster_uuid, \*\*params) -> ConfigUpdateResponse
+- client.databases.schema_registry.config.retrieve_subject(subject_name, \*, database_cluster_uuid) -> ConfigRetrieveSubjectResponse
+- client.databases.schema_registry.config.update_subject(subject_name, \*, database_cluster_uuid, \*\*params) -> ConfigUpdateSubjectResponse
-## Files
+# Nfs
Types:
```python
-from digitalocean_genai_sdk.types.vector_stores import (
- VectorStoreFileObject,
- FileDeleteResponse,
- FileRetrieveContentResponse,
+from gradient.types import (
+ NfCreateResponse,
+ NfRetrieveResponse,
+ NfListResponse,
+ NfInitiateActionResponse,
)
```
Methods:
-- client.vector_stores.files.create(vector_store_id, \*\*params) -> VectorStoreFileObject
-- client.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFileObject
-- client.vector_stores.files.update(file_id, \*, vector_store_id, \*\*params) -> VectorStoreFileObject
-- client.vector_stores.files.list(vector_store_id, \*\*params) -> ListVectorStoreFilesResponse
-- client.vector_stores.files.delete(file_id, \*, vector_store_id) -> FileDeleteResponse
-- client.vector_stores.files.retrieve_content(file_id, \*, vector_store_id) -> FileRetrieveContentResponse
+- client.nfs.create(\*\*params) -> NfCreateResponse
+- client.nfs.retrieve(nfs_id, \*\*params) -> NfRetrieveResponse
+- client.nfs.list(\*\*params) -> NfListResponse
+- client.nfs.delete(nfs_id, \*\*params) -> None
+- client.nfs.initiate_action(nfs_id, \*\*params) -> NfInitiateActionResponse
+
+## Snapshots
+
+Types:
+
+```python
+from gradient.types.nfs import SnapshotRetrieveResponse, SnapshotListResponse
+```
+
+Methods:
+
+- client.nfs.snapshots.retrieve(nfs_snapshot_id, \*\*params) -> SnapshotRetrieveResponse
+- client.nfs.snapshots.list(\*\*params) -> SnapshotListResponse
+- client.nfs.snapshots.delete(nfs_snapshot_id, \*\*params) -> None
+
+# Retrieve
+
+Types:
+
+```python
+from gradient.types import RetrieveDocumentsResponse
+```
+
+Methods:
+
+- client.retrieve.documents(knowledge_base_id, \*\*params) -> RetrieveDocumentsResponse
+
+# Apps
+
+## JobInvocations
+
+Types:
+
+```python
+from gradient.types.apps import JobInvocationCancelResponse
+```
+
+Methods:
+
+- client.apps.job_invocations.cancel(job_invocation_id, \*, app_id, \*\*params) -> JobInvocationCancelResponse
+
+# Billing
+
+Types:
+
+```python
+from gradient.types import BillingListInsightsResponse
+```
+
+Methods:
+
+- client.billing.list_insights(end_date, \*, account_urn, start_date, \*\*params) -> BillingListInsightsResponse
diff --git a/bin/check-release-environment b/bin/check-release-environment
new file mode 100644
index 00000000..b845b0f4
--- /dev/null
+++ b/bin/check-release-environment
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+errors=()
+
+if [ -z "${PYPI_TOKEN}" ]; then
+ errors+=("The PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.")
+fi
+
+lenErrors=${#errors[@]}
+
+if [[ lenErrors -gt 0 ]]; then
+ echo -e "Found the following errors in the release environment:\n"
+
+ for error in "${errors[@]}"; do
+ echo -e "- $error\n"
+ done
+
+ exit 1
+fi
+
+echo "The environment is ready to push releases!"
diff --git a/examples/agent_wait_until_ready.py b/examples/agent_wait_until_ready.py
new file mode 100644
index 00000000..6d14494c
--- /dev/null
+++ b/examples/agent_wait_until_ready.py
@@ -0,0 +1,99 @@
+"""
+Example: Wait for Agent Deployment to Complete
+
+This example demonstrates how to use the wait_until_ready() method to wait for
+an agent to finish deploying before using it.
+"""
+
+from gradient import Gradient, AgentDeploymentError, AgentDeploymentTimeoutError
+
+# Initialize the Gradient client
+client = Gradient()
+
+# Create a new agent
+agent_response = client.agents.create(
+ name="My Agent",
+ instruction="You are a helpful assistant",
+ model_uuid="",
+ region="nyc1",
+)
+
+agent_id = agent_response.agent.uuid if agent_response.agent else None
+
+if agent_id:
+ print(f"Agent created with ID: {agent_id}")
+ print("Waiting for agent to be ready...")
+
+ try:
+ # Wait for the agent to be deployed and ready
+ # This will poll the agent status every 5 seconds (default)
+ # and wait up to 5 minutes (default timeout=300 seconds)
+ ready_agent = client.agents.wait_until_ready(
+ agent_id,
+ poll_interval=5.0, # Check every 5 seconds
+ timeout=300.0, # Wait up to 5 minutes
+ )
+
+ if ready_agent.agent and ready_agent.agent.deployment:
+ print(f"Agent is ready! Status: {ready_agent.agent.deployment.status}")
+ print(f"Agent URL: {ready_agent.agent.url}")
+
+ # Now you can use the agent
+ # ...
+
+ except AgentDeploymentError as e:
+ print(f"Agent deployment failed: {e}")
+ print(f"Failed status: {e.status}")
+
+ except AgentDeploymentTimeoutError as e:
+ print(f"Agent deployment timed out: {e}")
+ print(f"Agent ID: {e.agent_id}")
+
+ except Exception as e:
+ print(f"Unexpected error: {e}")
+
+
+# Async example
+from gradient import AsyncGradient
+
+
+async def main() -> None:
+ async_client = AsyncGradient()
+
+ # Create a new agent
+ agent_response = await async_client.agents.create(
+ name="My Async Agent",
+ instruction="You are a helpful assistant",
+ model_uuid="",
+ region="nyc1",
+ )
+
+ agent_id = agent_response.agent.uuid if agent_response.agent else None
+
+ if agent_id:
+ print(f"Agent created with ID: {agent_id}")
+ print("Waiting for agent to be ready...")
+
+ try:
+ # Wait for the agent to be deployed and ready (async)
+ ready_agent = await async_client.agents.wait_until_ready(
+ agent_id,
+ poll_interval=5.0,
+ timeout=300.0,
+ )
+
+ if ready_agent.agent and ready_agent.agent.deployment:
+ print(f"Agent is ready! Status: {ready_agent.agent.deployment.status}")
+ print(f"Agent URL: {ready_agent.agent.url}")
+
+ except AgentDeploymentError as e:
+ print(f"Agent deployment failed: {e}")
+ print(f"Failed status: {e.status}")
+
+ except AgentDeploymentTimeoutError as e:
+ print(f"Agent deployment timed out: {e}")
+ print(f"Agent ID: {e.agent_id}")
+
+
+# Uncomment to run async example
+# asyncio.run(main())
diff --git a/examples/knowledge_base_indexing_wait.py b/examples/knowledge_base_indexing_wait.py
new file mode 100644
index 00000000..94550ee2
--- /dev/null
+++ b/examples/knowledge_base_indexing_wait.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python3
+"""
+Example: Waiting for Knowledge Base Indexing Job Completion
+
+This example demonstrates how to use the wait_for_completion() method
+to automatically wait for a knowledge base indexing job to finish,
+without needing to write manual polling loops.
+"""
+
+import os
+
+from gradient import Gradient, IndexingJobError, IndexingJobTimeoutError
+
+
+def main() -> None:
+ # Initialize the Gradient client
+ client = Gradient()
+
+ # Example 1: Basic usage - wait for indexing job to complete
+ print("Example 1: Basic usage")
+ print("-" * 50)
+
+ # Create an indexing job (replace with your actual knowledge base UUID)
+ knowledge_base_uuid = os.getenv("KNOWLEDGE_BASE_UUID", "your-kb-uuid-here")
+
+ print(f"Creating indexing job for knowledge base: {knowledge_base_uuid}")
+ indexing_job = client.knowledge_bases.indexing_jobs.create(
+ knowledge_base_uuid=knowledge_base_uuid,
+ )
+
+ job_uuid = indexing_job.job.uuid if indexing_job.job else None
+ if not job_uuid:
+ print("Error: Could not create indexing job")
+ return
+
+ print(f"Indexing job created with UUID: {job_uuid}")
+ print("Waiting for indexing job to complete...")
+
+ try:
+ # Wait for the job to complete (polls every 5 seconds by default)
+ completed_job = client.knowledge_bases.indexing_jobs.wait_for_completion(job_uuid)
+
+ print("\n✅ Indexing job completed successfully!")
+ if completed_job.job:
+ print(f"Phase: {completed_job.job.phase}")
+ print(f"Total datasources: {completed_job.job.total_datasources}")
+ print(f"Completed datasources: {completed_job.job.completed_datasources}")
+
+ except IndexingJobTimeoutError as e:
+ print(f"\n⏱️ Timeout: {e}")
+ except IndexingJobError as e:
+ print(f"\n❌ Error: {e}")
+ except Exception as e:
+ print(f"\n❌ Unexpected error: {e}")
+
+
+def example_with_custom_polling() -> None:
+ """Example with custom polling interval and timeout"""
+ print("\n\nExample 2: Custom polling interval and timeout")
+ print("-" * 50)
+
+ client = Gradient()
+ knowledge_base_uuid = os.getenv("KNOWLEDGE_BASE_UUID", "your-kb-uuid-here")
+
+ print(f"Creating indexing job for knowledge base: {knowledge_base_uuid}")
+ indexing_job = client.knowledge_bases.indexing_jobs.create(
+ knowledge_base_uuid=knowledge_base_uuid,
+ )
+
+ job_uuid = indexing_job.job.uuid if indexing_job.job else None
+ if not job_uuid:
+ print("Error: Could not create indexing job")
+ return
+
+ print(f"Indexing job created with UUID: {job_uuid}")
+ print("Waiting for indexing job to complete (polling every 10 seconds, 5 minute timeout)...")
+
+ try:
+ # Wait with custom poll interval (10 seconds) and timeout (5 minutes = 300 seconds)
+ completed_job = client.knowledge_bases.indexing_jobs.wait_for_completion(
+ job_uuid,
+ poll_interval=10, # Poll every 10 seconds
+ timeout=300, # Timeout after 5 minutes
+ )
+
+ print("\n✅ Indexing job completed successfully!")
+ if completed_job.job:
+ print(f"Phase: {completed_job.job.phase}")
+
+ except IndexingJobTimeoutError:
+ print("\n⏱️ Job did not complete within 5 minutes")
+ # You can still check the current status
+ current_status = client.knowledge_bases.indexing_jobs.retrieve(job_uuid)
+ if current_status.job:
+ print(f"Current phase: {current_status.job.phase}")
+ print(
+ f"Completed datasources: {current_status.job.completed_datasources}/{current_status.job.total_datasources}"
+ )
+ except IndexingJobError as e:
+ print(f"\n❌ Job failed: {e}")
+
+
+def example_manual_polling() -> None:
+ """Example of the old manual polling approach (for comparison)"""
+ print("\n\nExample 3: Manual polling (old approach)")
+ print("-" * 50)
+
+ client = Gradient()
+ knowledge_base_uuid = os.getenv("KNOWLEDGE_BASE_UUID", "your-kb-uuid-here")
+
+ indexing_job = client.knowledge_bases.indexing_jobs.create(
+ knowledge_base_uuid=knowledge_base_uuid,
+ )
+
+ job_uuid = indexing_job.job.uuid if indexing_job.job else None
+ if not job_uuid:
+ print("Error: Could not create indexing job")
+ return
+
+ print(f"Indexing job created with UUID: {job_uuid}")
+ print("Manual polling (old approach)...")
+
+ import time
+
+ while True:
+ indexing_job_status = client.knowledge_bases.indexing_jobs.retrieve(job_uuid)
+
+ if indexing_job_status.job and indexing_job_status.job.phase:
+ phase = indexing_job_status.job.phase
+ print(f"Current phase: {phase}")
+
+ if phase in ["BATCH_JOB_PHASE_UNKNOWN", "BATCH_JOB_PHASE_PENDING", "BATCH_JOB_PHASE_RUNNING"]:
+ time.sleep(5)
+ continue
+ elif phase == "BATCH_JOB_PHASE_SUCCEEDED":
+ print("✅ Job completed successfully!")
+ break
+ else:
+ print(f"❌ Job ended with phase: {phase}")
+ break
+
+
+async def example_async() -> None:
+ """Example using async/await"""
+ print("\n\nExample 4: Async usage")
+ print("-" * 50)
+
+ from gradient import AsyncGradient
+
+ client = AsyncGradient()
+ knowledge_base_uuid = os.getenv("KNOWLEDGE_BASE_UUID", "your-kb-uuid-here")
+
+ print(f"Creating indexing job for knowledge base: {knowledge_base_uuid}")
+ indexing_job = await client.knowledge_bases.indexing_jobs.create(
+ knowledge_base_uuid=knowledge_base_uuid,
+ )
+
+ job_uuid = indexing_job.job.uuid if indexing_job.job else None
+ if not job_uuid:
+ print("Error: Could not create indexing job")
+ return
+
+ print(f"Indexing job created with UUID: {job_uuid}")
+ print("Waiting for indexing job to complete (async)...")
+
+ try:
+ completed_job = await client.knowledge_bases.indexing_jobs.wait_for_completion(
+ job_uuid,
+ poll_interval=5,
+ timeout=600, # 10 minute timeout
+ )
+
+ print("\n✅ Indexing job completed successfully!")
+ if completed_job.job:
+ print(f"Phase: {completed_job.job.phase}")
+
+ except IndexingJobTimeoutError as e:
+ print(f"\n⏱️ Timeout: {e}")
+ except IndexingJobError as e:
+ print(f"\n❌ Error: {e}")
+ finally:
+ await client.close()
+
+
+if __name__ == "__main__":
+ # Run the basic example
+ main()
+
+ # Uncomment to run other examples:
+ # example_with_custom_polling()
+ # example_manual_polling()
+
+ # For async example, you would need to run:
+ # import asyncio
+ # asyncio.run(example_async())
diff --git a/examples/wait_for_knowledge_base.py b/examples/wait_for_knowledge_base.py
new file mode 100644
index 00000000..739ff80e
--- /dev/null
+++ b/examples/wait_for_knowledge_base.py
@@ -0,0 +1,64 @@
+"""
+Example demonstrating how to use the wait_for_database helper function.
+
+This example shows how to:
+1. Create a knowledge base
+2. Wait for its database to be ready
+3. Handle errors and timeouts appropriately
+"""
+
+import os
+
+from gradient import Gradient
+from gradient.resources.knowledge_bases import KnowledgeBaseTimeoutError, KnowledgeBaseDatabaseError
+
+
+def main() -> None:
+ """Create a knowledge base and wait for its database to be ready."""
+ # Initialize the Gradient client
+ # Note: DIGITALOCEAN_ACCESS_TOKEN must be set in your environment
+ client = Gradient(
+ access_token=os.environ.get("DIGITALOCEAN_ACCESS_TOKEN"),
+ )
+
+ # Create a knowledge base
+ # Replace these values with your actual configuration
+ kb_response = client.knowledge_bases.create(
+ name="My Knowledge Base",
+ region="nyc1", # Choose your preferred region
+ embedding_model_uuid="your-embedding-model-uuid", # Use your embedding model UUID
+ )
+
+ if not kb_response.knowledge_base or not kb_response.knowledge_base.uuid:
+ print("Failed to create knowledge base")
+ return
+
+ kb_uuid = kb_response.knowledge_base.uuid
+ print(f"Created knowledge base: {kb_uuid}")
+
+ try:
+ # Wait for the database to be ready
+ # Default: 10 minute timeout, 5 second poll interval
+ print("Waiting for database to be ready...")
+ result = client.knowledge_bases.wait_for_database(kb_uuid)
+ print(f"Database status: {result.database_status}") # "ONLINE"
+ print("Knowledge base is ready!")
+
+ # Alternative: Custom timeout and poll interval
+ # result = client.knowledge_bases.wait_for_database(
+ # kb_uuid,
+ # timeout=900.0, # 15 minutes
+ # poll_interval=10.0 # Check every 10 seconds
+ # )
+
+ except KnowledgeBaseDatabaseError as e:
+ # Database entered a failed state (DECOMMISSIONED or UNHEALTHY)
+ print(f"Database failed: {e}")
+
+ except KnowledgeBaseTimeoutError as e:
+ # Database did not become ready within the timeout period
+ print(f"Timeout: {e}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/mypy.ini b/mypy.ini
deleted file mode 100644
index 54f4282a..00000000
--- a/mypy.ini
+++ /dev/null
@@ -1,50 +0,0 @@
-[mypy]
-pretty = True
-show_error_codes = True
-
-# Exclude _files.py because mypy isn't smart enough to apply
-# the correct type narrowing and as this is an internal module
-# it's fine to just use Pyright.
-#
-# We also exclude our `tests` as mypy doesn't always infer
-# types correctly and Pyright will still catch any type errors.
-exclude = ^(src/digitalocean_genai_sdk/_files\.py|_dev/.*\.py|tests/.*)$
-
-strict_equality = True
-implicit_reexport = True
-check_untyped_defs = True
-no_implicit_optional = True
-
-warn_return_any = True
-warn_unreachable = True
-warn_unused_configs = True
-
-# Turn these options off as it could cause conflicts
-# with the Pyright options.
-warn_unused_ignores = False
-warn_redundant_casts = False
-
-disallow_any_generics = True
-disallow_untyped_defs = True
-disallow_untyped_calls = True
-disallow_subclassing_any = True
-disallow_incomplete_defs = True
-disallow_untyped_decorators = True
-cache_fine_grained = True
-
-# By default, mypy reports an error if you assign a value to the result
-# of a function call that doesn't return anything. We do this in our test
-# cases:
-# ```
-# result = ...
-# assert result is None
-# ```
-# Changing this codegen to make mypy happy would increase complexity
-# and would not be worth it.
-disable_error_code = func-returns-value,overload-cannot-match
-
-# https://github.com/python/mypy/issues/12162
-[mypy.overrides]
-module = "black.files.*"
-ignore_errors = true
-ignore_missing_imports = true
diff --git a/pyproject.toml b/pyproject.toml
index 33ffc05d..3a246099 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,59 +1,62 @@
[project]
-name = "digitalocean_genai_sdk"
-version = "0.0.1-alpha.0"
-description = "The official Python library for the digitalocean-genai-sdk API"
+name = "gradient"
+version = "3.12.0"
+description = "The official Python library for the Gradient API"
dynamic = ["readme"]
-license = "MIT"
-authors = [
-{ name = "Digitalocean Genai SDK", email = "" },
-]
+license = "Apache-2.0"
+authors = [{ name = "DigitalOcean, LLC", email = "dev@digitalocean.com" }]
+
dependencies = [
- "httpx>=0.23.0, <1",
- "pydantic>=1.9.0, <3",
- "typing-extensions>=4.10, <5",
- "anyio>=3.5.0, <5",
- "distro>=1.7.0, <2",
- "sniffio",
+ "httpx>=0.23.0, <1",
+ "pydantic>=1.9.0, <3",
+ "typing-extensions>=4.10, <5",
+ "anyio>=3.5.0, <5",
+ "distro>=1.7.0, <2",
+ "sniffio",
]
-requires-python = ">= 3.8"
+
+requires-python = ">= 3.9"
classifiers = [
"Typing :: Typed",
"Intended Audience :: Developers",
- "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
"Operating System :: OS Independent",
"Operating System :: POSIX",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Topic :: Software Development :: Libraries :: Python Modules",
- "License :: OSI Approved :: MIT License"
+ "License :: OSI Approved :: Apache Software License",
]
[project.urls]
-Homepage = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python"
-Repository = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python"
+Homepage = "https://github.com/digitalocean/gradient-python"
+Repository = "https://github.com/digitalocean/gradient-python"
+[project.optional-dependencies]
+aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.9"]
[tool.rye]
managed = true
# version pins are in requirements-dev.lock
dev-dependencies = [
- "pyright==1.1.399",
- "mypy",
- "respx",
- "pytest",
- "pytest-asyncio",
- "ruff",
- "time-machine",
- "nox",
- "dirty-equals>=0.6.0",
- "importlib-metadata>=6.7.0",
- "rich>=13.7.1",
- "nest_asyncio==1.6.0",
+ "pyright==1.1.399",
+ "mypy==1.17",
+ "respx",
+ "pytest",
+ "pytest-asyncio",
+ "ruff",
+ "time-machine",
+ "nox",
+ "dirty-equals>=0.6.0",
+ "importlib-metadata>=6.7.0",
+ "rich>=13.7.1",
+ "pytest-xdist>=3.6.1",
]
[tool.rye.scripts]
@@ -63,26 +66,19 @@ format = { chain = [
"fix:ruff",
# run formatting again to fix any inconsistencies when imports are stripped
"format:ruff",
-]}
+] }
"format:docs" = "python scripts/utils/ruffen-docs.py README.md api.md"
"format:ruff" = "ruff format"
-"lint" = { chain = [
- "check:ruff",
- "typecheck",
- "check:importable",
-]}
+"lint" = { chain = ["check:ruff", "typecheck", "check:importable"] }
"check:ruff" = "ruff check ."
"fix:ruff" = "ruff check --fix ."
-"check:importable" = "python -c 'import digitalocean_genai_sdk'"
+"check:importable" = "python -c 'import gradient'"
-typecheck = { chain = [
- "typecheck:pyright",
- "typecheck:mypy"
-]}
+typecheck = { chain = ["typecheck:pyright", "typecheck:mypy"] }
"typecheck:pyright" = "pyright"
-"typecheck:verify-types" = "pyright --verifytypes digitalocean_genai_sdk --ignoreexternal"
+"typecheck:verify-types" = "pyright --verifytypes gradient --ignoreexternal"
"typecheck:mypy" = "mypy ."
[build-system]
@@ -90,12 +86,10 @@ requires = ["hatchling==1.26.3", "hatch-fancy-pypi-readme"]
build-backend = "hatchling.build"
[tool.hatch.build]
-include = [
- "src/*"
-]
+include = ["src/*"]
[tool.hatch.build.targets.wheel]
-packages = ["src/digitalocean_genai_sdk"]
+packages = ["src/gradient"]
[tool.hatch.build.targets.sdist]
# Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc)
@@ -121,16 +115,17 @@ path = "README.md"
[[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]]
# replace relative links with absolute links
pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)'
-replacement = '[\1](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/\g<2>)'
+replacement = '[\1](https://github.com/digitalocean/gradient-python/tree/main/\g<2>)'
[tool.pytest.ini_options]
testpaths = ["tests"]
-addopts = "--tb=short"
+addopts = "--tb=short -n auto -m 'not smoke'"
xfail_strict = true
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "session"
-filterwarnings = [
- "error"
+filterwarnings = ["error"]
+markers = [
+ "smoke: lightweight external integration smoke tests hitting live Gradient services",
]
[tool.pyright]
@@ -138,12 +133,13 @@ filterwarnings = [
# there are a couple of flags that are still disabled by
# default in strict mode as they are experimental and niche.
typeCheckingMode = "strict"
-pythonVersion = "3.8"
+pythonVersion = "3.9"
exclude = [
"_dev",
".venv",
".nox",
+ ".git",
]
reportImplicitOverride = true
@@ -152,10 +148,62 @@ reportOverlappingOverload = false
reportImportCycles = false
reportPrivateUsage = false
+[tool.mypy]
+pretty = true
+show_error_codes = true
+
+# Exclude _files.py because mypy isn't smart enough to apply
+# the correct type narrowing and as this is an internal module
+# it's fine to just use Pyright.
+#
+# We also exclude our `tests` as mypy doesn't always infer
+# types correctly and Pyright will still catch any type errors.
+exclude = ['src/gradient/_files.py', '_dev/.*.py', 'tests/.*']
+
+strict_equality = true
+implicit_reexport = true
+check_untyped_defs = true
+no_implicit_optional = true
+
+warn_return_any = true
+warn_unreachable = true
+warn_unused_configs = true
+
+# Turn these options off as it could cause conflicts
+# with the Pyright options.
+warn_unused_ignores = false
+warn_redundant_casts = false
+
+disallow_any_generics = true
+disallow_untyped_defs = true
+disallow_untyped_calls = true
+disallow_subclassing_any = true
+disallow_incomplete_defs = true
+disallow_untyped_decorators = true
+cache_fine_grained = true
+
+# By default, mypy reports an error if you assign a value to the result
+# of a function call that doesn't return anything. We do this in our test
+# cases:
+# ```
+# result = ...
+# assert result is None
+# ```
+# Changing this codegen to make mypy happy would increase complexity
+# and would not be worth it.
+disable_error_code = "func-returns-value,overload-cannot-match"
+
+# https://github.com/python/mypy/issues/12162
+[[tool.mypy.overrides]]
+module = "black.files.*"
+ignore_errors = true
+ignore_missing_imports = true
+
+
[tool.ruff]
line-length = 120
output-format = "grouped"
-target-version = "py37"
+target-version = "py38"
[tool.ruff.format]
docstring-code-format = true
@@ -168,6 +216,8 @@ select = [
"B",
# remove unused imports
"F401",
+ # check for missing future annotations
+ "FA102",
# bare except statements
"E722",
# unused arguments
@@ -190,6 +240,8 @@ unfixable = [
"T203",
]
+extend-safe-fixes = ["FA102"]
+
[tool.ruff.lint.flake8-tidy-imports.banned-api]
"functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead"
@@ -198,10 +250,9 @@ length-sort = true
length-sort-straight = true
combine-as-imports = true
extra-standard-library = ["typing_extensions"]
-known-first-party = ["digitalocean_genai_sdk", "tests"]
+known-first-party = ["gradient", "tests"]
[tool.ruff.lint.per-file-ignores]
"bin/**.py" = ["T201", "T203"]
"scripts/**.py" = ["T201", "T203"]
-"tests/**.py" = ["T201", "T203"]
"examples/**.py" = ["T201", "T203"]
diff --git a/release-please-config.json b/release-please-config.json
new file mode 100644
index 00000000..0b0d1705
--- /dev/null
+++ b/release-please-config.json
@@ -0,0 +1,66 @@
+{
+ "packages": {
+ ".": {}
+ },
+ "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json",
+ "include-v-in-tag": true,
+ "include-component-in-tag": false,
+ "versioning": "prerelease",
+ "prerelease": true,
+ "bump-minor-pre-major": true,
+ "bump-patch-for-minor-pre-major": false,
+ "pull-request-header": "Automated Release PR",
+ "pull-request-title-pattern": "release: ${version}",
+ "changelog-sections": [
+ {
+ "type": "feat",
+ "section": "Features"
+ },
+ {
+ "type": "fix",
+ "section": "Bug Fixes"
+ },
+ {
+ "type": "perf",
+ "section": "Performance Improvements"
+ },
+ {
+ "type": "revert",
+ "section": "Reverts"
+ },
+ {
+ "type": "chore",
+ "section": "Chores"
+ },
+ {
+ "type": "docs",
+ "section": "Documentation"
+ },
+ {
+ "type": "style",
+ "section": "Styles"
+ },
+ {
+ "type": "refactor",
+ "section": "Refactors"
+ },
+ {
+ "type": "test",
+ "section": "Tests",
+ "hidden": true
+ },
+ {
+ "type": "build",
+ "section": "Build System"
+ },
+ {
+ "type": "ci",
+ "section": "Continuous Integration",
+ "hidden": true
+ }
+ ],
+ "release-type": "python",
+ "extra-files": [
+ "src/gradient/_version.py"
+ ]
+}
\ No newline at end of file
diff --git a/requirements-dev.lock b/requirements-dev.lock
index bf449af3..667e0dff 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -10,95 +10,140 @@
# universal: false
-e file:.
-annotated-types==0.6.0
+aiohappyeyeballs==2.6.1
+ # via aiohttp
+aiohttp==3.13.3
+ # via gradient
+ # via httpx-aiohttp
+aiosignal==1.4.0
+ # via aiohttp
+annotated-types==0.7.0
# via pydantic
-anyio==4.4.0
- # via digitalocean-genai-sdk
+anyio==4.12.1
+ # via gradient
# via httpx
-argcomplete==3.1.2
+argcomplete==3.6.3
# via nox
-certifi==2023.7.22
+async-timeout==5.0.1
+ # via aiohttp
+attrs==25.4.0
+ # via aiohttp
+ # via nox
+backports-asyncio-runner==1.2.0
+ # via pytest-asyncio
+certifi==2026.1.4
# via httpcore
# via httpx
-colorlog==6.7.0
+colorlog==6.10.1
+ # via nox
+dependency-groups==1.3.1
# via nox
-dirty-equals==0.6.0
-distlib==0.3.7
+dirty-equals==0.11
+distlib==0.4.0
# via virtualenv
-distro==1.8.0
- # via digitalocean-genai-sdk
-exceptiongroup==1.2.2
+distro==1.9.0
+ # via gradient
+exceptiongroup==1.3.1
# via anyio
# via pytest
-filelock==3.12.4
+execnet==2.1.2
+ # via pytest-xdist
+filelock==3.19.1
# via virtualenv
-h11==0.14.0
+frozenlist==1.8.0
+ # via aiohttp
+ # via aiosignal
+h11==0.16.0
# via httpcore
-httpcore==1.0.2
+httpcore==1.0.9
# via httpx
httpx==0.28.1
- # via digitalocean-genai-sdk
+ # via gradient
+ # via httpx-aiohttp
# via respx
-idna==3.4
+httpx-aiohttp==0.1.12
+ # via gradient
+humanize==4.13.0
+ # via nox
+idna==3.11
# via anyio
# via httpx
-importlib-metadata==7.0.0
-iniconfig==2.0.0
+ # via yarl
+importlib-metadata==8.7.1
+iniconfig==2.1.0
# via pytest
markdown-it-py==3.0.0
# via rich
mdurl==0.1.2
# via markdown-it-py
-mypy==1.14.1
-mypy-extensions==1.0.0
+multidict==6.7.0
+ # via aiohttp
+ # via yarl
+mypy==1.17.0
+mypy-extensions==1.1.0
# via mypy
-nest-asyncio==1.6.0
-nodeenv==1.8.0
+nodeenv==1.10.0
# via pyright
-nox==2023.4.22
-packaging==23.2
+nox==2025.11.12
+packaging==25.0
+ # via dependency-groups
# via nox
# via pytest
-platformdirs==3.11.0
+pathspec==1.0.3
+ # via mypy
+platformdirs==4.4.0
# via virtualenv
-pluggy==1.5.0
+pluggy==1.6.0
# via pytest
-pydantic==2.10.3
- # via digitalocean-genai-sdk
-pydantic-core==2.27.1
+propcache==0.4.1
+ # via aiohttp
+ # via yarl
+pydantic==2.12.5
+ # via gradient
+pydantic-core==2.41.5
# via pydantic
-pygments==2.18.0
+pygments==2.19.2
+ # via pytest
# via rich
pyright==1.1.399
-pytest==8.3.3
+pytest==8.4.2
# via pytest-asyncio
-pytest-asyncio==0.24.0
-python-dateutil==2.8.2
+ # via pytest-xdist
+pytest-asyncio==1.2.0
+pytest-xdist==3.8.0
+python-dateutil==2.9.0.post0
# via time-machine
-pytz==2023.3.post1
- # via dirty-equals
respx==0.22.0
-rich==13.7.1
-ruff==0.9.4
-setuptools==68.2.2
- # via nodeenv
-six==1.16.0
+rich==14.2.0
+ruff==0.14.13
+six==1.17.0
# via python-dateutil
-sniffio==1.3.0
- # via anyio
- # via digitalocean-genai-sdk
-time-machine==2.9.0
-tomli==2.0.2
+sniffio==1.3.1
+ # via gradient
+time-machine==2.19.0
+tomli==2.4.0
+ # via dependency-groups
# via mypy
+ # via nox
# via pytest
-typing-extensions==4.12.2
+typing-extensions==4.15.0
+ # via aiosignal
# via anyio
- # via digitalocean-genai-sdk
+ # via exceptiongroup
+ # via gradient
+ # via multidict
# via mypy
# via pydantic
# via pydantic-core
# via pyright
-virtualenv==20.24.5
+ # via pytest-asyncio
+ # via typing-inspection
+ # via virtualenv
+typing-inspection==0.4.2
+ # via pydantic
+virtualenv==20.36.1
# via nox
-zipp==3.17.0
+yarl==1.22.0
+ # via aiohttp
+zipp==3.23.0
# via importlib-metadata
diff --git a/requirements.lock b/requirements.lock
index e655776d..b48c65ea 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -10,36 +10,67 @@
# universal: false
-e file:.
-annotated-types==0.6.0
+aiohappyeyeballs==2.6.1
+ # via aiohttp
+aiohttp==3.13.3
+ # via gradient
+ # via httpx-aiohttp
+aiosignal==1.4.0
+ # via aiohttp
+annotated-types==0.7.0
# via pydantic
-anyio==4.4.0
- # via digitalocean-genai-sdk
+anyio==4.12.1
+ # via gradient
# via httpx
-certifi==2023.7.22
+async-timeout==5.0.1
+ # via aiohttp
+attrs==25.4.0
+ # via aiohttp
+certifi==2026.1.4
# via httpcore
# via httpx
-distro==1.8.0
- # via digitalocean-genai-sdk
-exceptiongroup==1.2.2
+distro==1.9.0
+ # via gradient
+exceptiongroup==1.3.1
# via anyio
-h11==0.14.0
+frozenlist==1.8.0
+ # via aiohttp
+ # via aiosignal
+h11==0.16.0
# via httpcore
-httpcore==1.0.2
+httpcore==1.0.9
# via httpx
httpx==0.28.1
- # via digitalocean-genai-sdk
-idna==3.4
+ # via gradient
+ # via httpx-aiohttp
+httpx-aiohttp==0.1.12
+ # via gradient
+idna==3.11
# via anyio
# via httpx
-pydantic==2.10.3
- # via digitalocean-genai-sdk
-pydantic-core==2.27.1
+ # via yarl
+multidict==6.7.0
+ # via aiohttp
+ # via yarl
+propcache==0.4.1
+ # via aiohttp
+ # via yarl
+pydantic==2.12.5
+ # via gradient
+pydantic-core==2.41.5
# via pydantic
-sniffio==1.3.0
+sniffio==1.3.1
+ # via gradient
+typing-extensions==4.15.0
+ # via aiosignal
# via anyio
- # via digitalocean-genai-sdk
-typing-extensions==4.12.2
- # via anyio
- # via digitalocean-genai-sdk
+ # via exceptiongroup
+ # via gradient
+ # via multidict
# via pydantic
# via pydantic-core
+ # via typing-inspection
+typing-inspection==0.4.2
+ # via pydantic
+yarl==1.22.0
+ # via aiohttp
diff --git a/scripts/bootstrap b/scripts/bootstrap
index e84fe62c..b430fee3 100755
--- a/scripts/bootstrap
+++ b/scripts/bootstrap
@@ -4,10 +4,18 @@ set -e
cd "$(dirname "$0")/.."
-if ! command -v rye >/dev/null 2>&1 && [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then
+if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ] && [ -t 0 ]; then
brew bundle check >/dev/null 2>&1 || {
- echo "==> Installing Homebrew dependencies…"
- brew bundle
+ echo -n "==> Install Homebrew dependencies? (y/N): "
+ read -r response
+ case "$response" in
+ [yY][eE][sS]|[yY])
+ brew bundle
+ ;;
+ *)
+ ;;
+ esac
+ echo
}
fi
diff --git a/scripts/lint b/scripts/lint
index 3f725f2d..bc51411f 100755
--- a/scripts/lint
+++ b/scripts/lint
@@ -4,8 +4,13 @@ set -e
cd "$(dirname "$0")/.."
-echo "==> Running lints"
-rye run lint
+if [ "$1" = "--fix" ]; then
+ echo "==> Running lints with --fix"
+ rye run fix:ruff
+else
+ echo "==> Running lints"
+ rye run lint
+fi
echo "==> Making sure it imports"
-rye run python -c 'import digitalocean_genai_sdk'
+rye run python -c 'import gradient'
diff --git a/scripts/mock b/scripts/mock
deleted file mode 100755
index d2814ae6..00000000
--- a/scripts/mock
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-cd "$(dirname "$0")/.."
-
-if [[ -n "$1" && "$1" != '--'* ]]; then
- URL="$1"
- shift
-else
- URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)"
-fi
-
-# Check if the URL is empty
-if [ -z "$URL" ]; then
- echo "Error: No OpenAPI spec path/url provided or found in .stats.yml"
- exit 1
-fi
-
-echo "==> Starting mock server with URL ${URL}"
-
-# Run prism mock on the given spec
-if [ "$1" == "--daemon" ]; then
- npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log &
-
- # Wait for server to come online
- echo -n "Waiting for server"
- while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do
- echo -n "."
- sleep 0.1
- done
-
- if grep -q "✖ fatal" ".prism.log"; then
- cat .prism.log
- exit 1
- fi
-
- echo
-else
- npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL"
-fi
diff --git a/scripts/smoke b/scripts/smoke
new file mode 100755
index 00000000..02df51b9
--- /dev/null
+++ b/scripts/smoke
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+# Purpose: Run live smoke tests (sync + async) against real Gradient services.
+# These tests require valid credentials and are excluded from the normal test run.
+# Usage:
+# ./scripts/smoke # run all smoke tests
+# ./scripts/smoke -k agents # pass through extra pytest args
+#
+# If a .env file exists in the repo root it will be sourced automatically.
+
+set -euo pipefail
+
+cd "$(dirname "$0")/.."
+
+if [ -f .env ]; then
+ echo "==> Loading .env"
+ # export variables declared in .env
+ set -a
+ # shellcheck disable=SC1091
+ source .env
+ set +a
+fi
+
+required=(
+ DIGITALOCEAN_ACCESS_TOKEN
+ GRADIENT_MODEL_ACCESS_KEY
+ GRADIENT_AGENT_ACCESS_KEY
+ GRADIENT_AGENT_ENDPOINT
+)
+
+missing=()
+for var in "${required[@]}"; do
+ if [ -z "${!var:-}" ]; then
+ missing+=("$var")
+ fi
+done
+
+if [ ${#missing[@]} -ne 0 ]; then
+ echo "ERROR: Missing required environment variables for smoke tests:" >&2
+ printf ' %s\n' "${missing[@]}" >&2
+ echo >&2
+ echo "Provide them via your shell environment or a .env file (see .env.example)." >&2
+ exit 1
+fi
+
+echo "==> Running smoke tests (marker: smoke)"
+rye run pytest -m smoke "$@"
diff --git a/scripts/test b/scripts/test
index 2b878456..1012c870 100755
--- a/scripts/test
+++ b/scripts/test
@@ -4,58 +4,27 @@ set -e
cd "$(dirname "$0")/.."
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[0;33m'
-NC='\033[0m' # No Color
-
-function prism_is_running() {
- curl --silent "http://localhost:4010" >/dev/null 2>&1
-}
-
-kill_server_on_port() {
- pids=$(lsof -t -i tcp:"$1" || echo "")
- if [ "$pids" != "" ]; then
- kill "$pids"
- echo "Stopped $pids."
- fi
-}
-
-function is_overriding_api_base_url() {
- [ -n "$TEST_API_BASE_URL" ]
-}
-
-if ! is_overriding_api_base_url && ! prism_is_running ; then
- # When we exit this script, make sure to kill the background mock server process
- trap 'kill_server_on_port 4010' EXIT
-
- # Start the dev server
- ./scripts/mock --daemon
-fi
-if is_overriding_api_base_url ; then
- echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
- echo
-elif ! prism_is_running ; then
- echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
- echo -e "running against your OpenAPI spec."
- echo
- echo -e "To run the server, pass in the path or url of your OpenAPI"
- echo -e "spec to the prism command:"
- echo
- echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}"
- echo
-
- exit 1
-else
- echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
- echo
-fi
export DEFER_PYDANTIC_BUILD=false
-echo "==> Running tests"
-rye run pytest "$@"
+# Clear out any existing API keys to ensure tests run in a clean environment
+unset DIGITALOCEAN_ACCESS_TOKEN
+unset GRADIENT_MODEL_ACCESS_KEY
+unset GRADIENT_AGENT_ACCESS_KEY
+unset GRADIENT_AGENT_ENDPOINT
+
+echo "==> Running tests (excluding smoke tests by default)"
+if [ $# -eq 0 ]; then
+ # No explicit args provided; exclude smoke tests by default
+ rye run pytest -m 'not smoke'
+else
+ rye run pytest "$@"
+fi
-echo "==> Running Pydantic v1 tests"
-rye run nox -s test-pydantic-v1 -- "$@"
+echo "==> Running Pydantic v1 tests (excluding smoke tests by default)"
+if [ $# -eq 0 ]; then
+ rye run nox -s test-pydantic-v1 -- -m 'not smoke'
+else
+ rye run nox -s test-pydantic-v1 -- "$@"
+fi
diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh
index c1019559..d93584b2 100755
--- a/scripts/utils/upload-artifact.sh
+++ b/scripts/utils/upload-artifact.sh
@@ -1,7 +1,9 @@
#!/usr/bin/env bash
set -exuo pipefail
-RESPONSE=$(curl -X POST "$URL" \
+FILENAME=$(basename dist/*.whl)
+
+RESPONSE=$(curl -X POST "$URL?filename=$FILENAME" \
-H "Authorization: Bearer $AUTH" \
-H "Content-Type: application/json")
@@ -12,13 +14,13 @@ if [[ "$SIGNED_URL" == "null" ]]; then
exit 1
fi
-UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \
- -H "Content-Type: application/gzip" \
- --data-binary @- "$SIGNED_URL" 2>&1)
+UPLOAD_RESPONSE=$(curl -v -X PUT \
+ -H "Content-Type: binary/octet-stream" \
+ --data-binary "@dist/$FILENAME" "$SIGNED_URL" 2>&1)
if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then
echo -e "\033[32mUploaded build to Stainless storage.\033[0m"
- echo -e "\033[32mInstallation: pip install --pre 'https://pkg.stainless.com/s/digitalocean-genai-sdk-python/$SHA'\033[0m"
+ echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/gradient-python/$SHA/$FILENAME'\033[0m"
else
echo -e "\033[31mFailed to upload artifact.\033[0m"
exit 1
diff --git a/src/digitalocean_genai_sdk/__init__.py b/src/digitalocean_genai_sdk/__init__.py
deleted file mode 100644
index fc240d83..00000000
--- a/src/digitalocean_genai_sdk/__init__.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import typing as _t
-
-from . import types
-from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes
-from ._utils import file_from_path
-from ._client import (
- Client,
- Stream,
- Timeout,
- Transport,
- AsyncClient,
- AsyncStream,
- RequestOptions,
- DigitaloceanGenaiSDK,
- AsyncDigitaloceanGenaiSDK,
-)
-from ._models import BaseModel
-from ._version import __title__, __version__
-from ._response import APIResponse as APIResponse, AsyncAPIResponse as AsyncAPIResponse
-from ._constants import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES, DEFAULT_CONNECTION_LIMITS
-from ._exceptions import (
- APIError,
- ConflictError,
- NotFoundError,
- APIStatusError,
- RateLimitError,
- APITimeoutError,
- BadRequestError,
- APIConnectionError,
- AuthenticationError,
- InternalServerError,
- PermissionDeniedError,
- UnprocessableEntityError,
- DigitaloceanGenaiSDKError,
- APIResponseValidationError,
-)
-from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient
-from ._utils._logs import setup_logging as _setup_logging
-
-__all__ = [
- "types",
- "__version__",
- "__title__",
- "NoneType",
- "Transport",
- "ProxiesTypes",
- "NotGiven",
- "NOT_GIVEN",
- "Omit",
- "DigitaloceanGenaiSDKError",
- "APIError",
- "APIStatusError",
- "APITimeoutError",
- "APIConnectionError",
- "APIResponseValidationError",
- "BadRequestError",
- "AuthenticationError",
- "PermissionDeniedError",
- "NotFoundError",
- "ConflictError",
- "UnprocessableEntityError",
- "RateLimitError",
- "InternalServerError",
- "Timeout",
- "RequestOptions",
- "Client",
- "AsyncClient",
- "Stream",
- "AsyncStream",
- "DigitaloceanGenaiSDK",
- "AsyncDigitaloceanGenaiSDK",
- "file_from_path",
- "BaseModel",
- "DEFAULT_TIMEOUT",
- "DEFAULT_MAX_RETRIES",
- "DEFAULT_CONNECTION_LIMITS",
- "DefaultHttpxClient",
- "DefaultAsyncHttpxClient",
-]
-
-if not _t.TYPE_CHECKING:
- from ._utils._resources_proxy import resources as resources
-
-_setup_logging()
-
-# Update the __module__ attribute for exported symbols so that
-# error messages point to this module instead of the module
-# it was originally defined in, e.g.
-# digitalocean_genai_sdk._exceptions.NotFoundError -> digitalocean_genai_sdk.NotFoundError
-__locals = locals()
-for __name in __all__:
- if not __name.startswith("__"):
- try:
- __locals[__name].__module__ = "digitalocean_genai_sdk"
- except (TypeError, AttributeError):
- # Some of our exported symbols are builtins which we can't set attributes for.
- pass
diff --git a/src/digitalocean_genai_sdk/_client.py b/src/digitalocean_genai_sdk/_client.py
deleted file mode 100644
index 99580b5e..00000000
--- a/src/digitalocean_genai_sdk/_client.py
+++ /dev/null
@@ -1,549 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, Union, Mapping
-from typing_extensions import Self, override
-
-import httpx
-
-from . import _exceptions
-from ._qs import Querystring
-from ._types import (
- NOT_GIVEN,
- Omit,
- Timeout,
- NotGiven,
- Transport,
- ProxiesTypes,
- RequestOptions,
-)
-from ._utils import is_given, get_async_library
-from ._version import __version__
-from .resources import (
- audio,
- files,
- images,
- models,
- batches,
- uploads,
- realtime,
- responses,
- assistants,
- embeddings,
- completions,
- moderations,
-)
-from ._streaming import Stream as Stream, AsyncStream as AsyncStream
-from ._exceptions import APIStatusError, DigitaloceanGenaiSDKError
-from ._base_client import (
- DEFAULT_MAX_RETRIES,
- SyncAPIClient,
- AsyncAPIClient,
-)
-from .resources.chat import chat
-from .resources.threads import threads
-from .resources.fine_tuning import fine_tuning
-from .resources.organization import organization
-from .resources.vector_stores import vector_stores
-
-__all__ = [
- "Timeout",
- "Transport",
- "ProxiesTypes",
- "RequestOptions",
- "DigitaloceanGenaiSDK",
- "AsyncDigitaloceanGenaiSDK",
- "Client",
- "AsyncClient",
-]
-
-
-class DigitaloceanGenaiSDK(SyncAPIClient):
- assistants: assistants.AssistantsResource
- audio: audio.AudioResource
- batches: batches.BatchesResource
- chat: chat.ChatResource
- completions: completions.CompletionsResource
- embeddings: embeddings.EmbeddingsResource
- files: files.FilesResource
- fine_tuning: fine_tuning.FineTuningResource
- images: images.ImagesResource
- models: models.ModelsResource
- moderations: moderations.ModerationsResource
- organization: organization.OrganizationResource
- realtime: realtime.RealtimeResource
- responses: responses.ResponsesResource
- threads: threads.ThreadsResource
- uploads: uploads.UploadsResource
- vector_stores: vector_stores.VectorStoresResource
- with_raw_response: DigitaloceanGenaiSDKWithRawResponse
- with_streaming_response: DigitaloceanGenaiSDKWithStreamedResponse
-
- # client options
- api_key: str
-
- def __init__(
- self,
- *,
- api_key: str | None = None,
- base_url: str | httpx.URL | None = None,
- timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
- max_retries: int = DEFAULT_MAX_RETRIES,
- default_headers: Mapping[str, str] | None = None,
- default_query: Mapping[str, object] | None = None,
- # Configure a custom httpx client.
- # We provide a `DefaultHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
- # See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
- http_client: httpx.Client | None = None,
- # Enable or disable schema validation for data returned by the API.
- # When enabled an error APIResponseValidationError is raised
- # if the API responds with invalid data for the expected schema.
- #
- # This parameter may be removed or changed in the future.
- # If you rely on this feature, please open a GitHub issue
- # outlining your use-case to help us decide if it should be
- # part of our public interface in the future.
- _strict_response_validation: bool = False,
- ) -> None:
- """Construct a new synchronous DigitaloceanGenaiSDK client instance.
-
- This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided.
- """
- if api_key is None:
- api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY")
- if api_key is None:
- raise DigitaloceanGenaiSDKError(
- "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable"
- )
- self.api_key = api_key
-
- if base_url is None:
- base_url = os.environ.get("DIGITALOCEAN_GENAI_SDK_BASE_URL")
- if base_url is None:
- base_url = f"https://api.example.com"
-
- super().__init__(
- version=__version__,
- base_url=base_url,
- max_retries=max_retries,
- timeout=timeout,
- http_client=http_client,
- custom_headers=default_headers,
- custom_query=default_query,
- _strict_response_validation=_strict_response_validation,
- )
-
- self.assistants = assistants.AssistantsResource(self)
- self.audio = audio.AudioResource(self)
- self.batches = batches.BatchesResource(self)
- self.chat = chat.ChatResource(self)
- self.completions = completions.CompletionsResource(self)
- self.embeddings = embeddings.EmbeddingsResource(self)
- self.files = files.FilesResource(self)
- self.fine_tuning = fine_tuning.FineTuningResource(self)
- self.images = images.ImagesResource(self)
- self.models = models.ModelsResource(self)
- self.moderations = moderations.ModerationsResource(self)
- self.organization = organization.OrganizationResource(self)
- self.realtime = realtime.RealtimeResource(self)
- self.responses = responses.ResponsesResource(self)
- self.threads = threads.ThreadsResource(self)
- self.uploads = uploads.UploadsResource(self)
- self.vector_stores = vector_stores.VectorStoresResource(self)
- self.with_raw_response = DigitaloceanGenaiSDKWithRawResponse(self)
- self.with_streaming_response = DigitaloceanGenaiSDKWithStreamedResponse(self)
-
- @property
- @override
- def qs(self) -> Querystring:
- return Querystring(array_format="comma")
-
- @property
- @override
- def auth_headers(self) -> dict[str, str]:
- api_key = self.api_key
- return {"Authorization": f"Bearer {api_key}"}
-
- @property
- @override
- def default_headers(self) -> dict[str, str | Omit]:
- return {
- **super().default_headers,
- "X-Stainless-Async": "false",
- **self._custom_headers,
- }
-
- def copy(
- self,
- *,
- api_key: str | None = None,
- base_url: str | httpx.URL | None = None,
- timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
- http_client: httpx.Client | None = None,
- max_retries: int | NotGiven = NOT_GIVEN,
- default_headers: Mapping[str, str] | None = None,
- set_default_headers: Mapping[str, str] | None = None,
- default_query: Mapping[str, object] | None = None,
- set_default_query: Mapping[str, object] | None = None,
- _extra_kwargs: Mapping[str, Any] = {},
- ) -> Self:
- """
- Create a new client instance re-using the same options given to the current client with optional overriding.
- """
- if default_headers is not None and set_default_headers is not None:
- raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
-
- if default_query is not None and set_default_query is not None:
- raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
-
- headers = self._custom_headers
- if default_headers is not None:
- headers = {**headers, **default_headers}
- elif set_default_headers is not None:
- headers = set_default_headers
-
- params = self._custom_query
- if default_query is not None:
- params = {**params, **default_query}
- elif set_default_query is not None:
- params = set_default_query
-
- http_client = http_client or self._client
- return self.__class__(
- api_key=api_key or self.api_key,
- base_url=base_url or self.base_url,
- timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
- http_client=http_client,
- max_retries=max_retries if is_given(max_retries) else self.max_retries,
- default_headers=headers,
- default_query=params,
- **_extra_kwargs,
- )
-
- # Alias for `copy` for nicer inline usage, e.g.
- # client.with_options(timeout=10).foo.create(...)
- with_options = copy
-
- @override
- def _make_status_error(
- self,
- err_msg: str,
- *,
- body: object,
- response: httpx.Response,
- ) -> APIStatusError:
- if response.status_code == 400:
- return _exceptions.BadRequestError(err_msg, response=response, body=body)
-
- if response.status_code == 401:
- return _exceptions.AuthenticationError(err_msg, response=response, body=body)
-
- if response.status_code == 403:
- return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
-
- if response.status_code == 404:
- return _exceptions.NotFoundError(err_msg, response=response, body=body)
-
- if response.status_code == 409:
- return _exceptions.ConflictError(err_msg, response=response, body=body)
-
- if response.status_code == 422:
- return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
-
- if response.status_code == 429:
- return _exceptions.RateLimitError(err_msg, response=response, body=body)
-
- if response.status_code >= 500:
- return _exceptions.InternalServerError(err_msg, response=response, body=body)
- return APIStatusError(err_msg, response=response, body=body)
-
-
-class AsyncDigitaloceanGenaiSDK(AsyncAPIClient):
- assistants: assistants.AsyncAssistantsResource
- audio: audio.AsyncAudioResource
- batches: batches.AsyncBatchesResource
- chat: chat.AsyncChatResource
- completions: completions.AsyncCompletionsResource
- embeddings: embeddings.AsyncEmbeddingsResource
- files: files.AsyncFilesResource
- fine_tuning: fine_tuning.AsyncFineTuningResource
- images: images.AsyncImagesResource
- models: models.AsyncModelsResource
- moderations: moderations.AsyncModerationsResource
- organization: organization.AsyncOrganizationResource
- realtime: realtime.AsyncRealtimeResource
- responses: responses.AsyncResponsesResource
- threads: threads.AsyncThreadsResource
- uploads: uploads.AsyncUploadsResource
- vector_stores: vector_stores.AsyncVectorStoresResource
- with_raw_response: AsyncDigitaloceanGenaiSDKWithRawResponse
- with_streaming_response: AsyncDigitaloceanGenaiSDKWithStreamedResponse
-
- # client options
- api_key: str
-
- def __init__(
- self,
- *,
- api_key: str | None = None,
- base_url: str | httpx.URL | None = None,
- timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
- max_retries: int = DEFAULT_MAX_RETRIES,
- default_headers: Mapping[str, str] | None = None,
- default_query: Mapping[str, object] | None = None,
- # Configure a custom httpx client.
- # We provide a `DefaultAsyncHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
- # See the [httpx documentation](https://www.python-httpx.org/api/#asyncclient) for more details.
- http_client: httpx.AsyncClient | None = None,
- # Enable or disable schema validation for data returned by the API.
- # When enabled an error APIResponseValidationError is raised
- # if the API responds with invalid data for the expected schema.
- #
- # This parameter may be removed or changed in the future.
- # If you rely on this feature, please open a GitHub issue
- # outlining your use-case to help us decide if it should be
- # part of our public interface in the future.
- _strict_response_validation: bool = False,
- ) -> None:
- """Construct a new async AsyncDigitaloceanGenaiSDK client instance.
-
- This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided.
- """
- if api_key is None:
- api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY")
- if api_key is None:
- raise DigitaloceanGenaiSDKError(
- "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable"
- )
- self.api_key = api_key
-
- if base_url is None:
- base_url = os.environ.get("DIGITALOCEAN_GENAI_SDK_BASE_URL")
- if base_url is None:
- base_url = f"https://api.example.com"
-
- super().__init__(
- version=__version__,
- base_url=base_url,
- max_retries=max_retries,
- timeout=timeout,
- http_client=http_client,
- custom_headers=default_headers,
- custom_query=default_query,
- _strict_response_validation=_strict_response_validation,
- )
-
- self.assistants = assistants.AsyncAssistantsResource(self)
- self.audio = audio.AsyncAudioResource(self)
- self.batches = batches.AsyncBatchesResource(self)
- self.chat = chat.AsyncChatResource(self)
- self.completions = completions.AsyncCompletionsResource(self)
- self.embeddings = embeddings.AsyncEmbeddingsResource(self)
- self.files = files.AsyncFilesResource(self)
- self.fine_tuning = fine_tuning.AsyncFineTuningResource(self)
- self.images = images.AsyncImagesResource(self)
- self.models = models.AsyncModelsResource(self)
- self.moderations = moderations.AsyncModerationsResource(self)
- self.organization = organization.AsyncOrganizationResource(self)
- self.realtime = realtime.AsyncRealtimeResource(self)
- self.responses = responses.AsyncResponsesResource(self)
- self.threads = threads.AsyncThreadsResource(self)
- self.uploads = uploads.AsyncUploadsResource(self)
- self.vector_stores = vector_stores.AsyncVectorStoresResource(self)
- self.with_raw_response = AsyncDigitaloceanGenaiSDKWithRawResponse(self)
- self.with_streaming_response = AsyncDigitaloceanGenaiSDKWithStreamedResponse(self)
-
- @property
- @override
- def qs(self) -> Querystring:
- return Querystring(array_format="comma")
-
- @property
- @override
- def auth_headers(self) -> dict[str, str]:
- api_key = self.api_key
- return {"Authorization": f"Bearer {api_key}"}
-
- @property
- @override
- def default_headers(self) -> dict[str, str | Omit]:
- return {
- **super().default_headers,
- "X-Stainless-Async": f"async:{get_async_library()}",
- **self._custom_headers,
- }
-
- def copy(
- self,
- *,
- api_key: str | None = None,
- base_url: str | httpx.URL | None = None,
- timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
- http_client: httpx.AsyncClient | None = None,
- max_retries: int | NotGiven = NOT_GIVEN,
- default_headers: Mapping[str, str] | None = None,
- set_default_headers: Mapping[str, str] | None = None,
- default_query: Mapping[str, object] | None = None,
- set_default_query: Mapping[str, object] | None = None,
- _extra_kwargs: Mapping[str, Any] = {},
- ) -> Self:
- """
- Create a new client instance re-using the same options given to the current client with optional overriding.
- """
- if default_headers is not None and set_default_headers is not None:
- raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
-
- if default_query is not None and set_default_query is not None:
- raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
-
- headers = self._custom_headers
- if default_headers is not None:
- headers = {**headers, **default_headers}
- elif set_default_headers is not None:
- headers = set_default_headers
-
- params = self._custom_query
- if default_query is not None:
- params = {**params, **default_query}
- elif set_default_query is not None:
- params = set_default_query
-
- http_client = http_client or self._client
- return self.__class__(
- api_key=api_key or self.api_key,
- base_url=base_url or self.base_url,
- timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
- http_client=http_client,
- max_retries=max_retries if is_given(max_retries) else self.max_retries,
- default_headers=headers,
- default_query=params,
- **_extra_kwargs,
- )
-
- # Alias for `copy` for nicer inline usage, e.g.
- # client.with_options(timeout=10).foo.create(...)
- with_options = copy
-
- @override
- def _make_status_error(
- self,
- err_msg: str,
- *,
- body: object,
- response: httpx.Response,
- ) -> APIStatusError:
- if response.status_code == 400:
- return _exceptions.BadRequestError(err_msg, response=response, body=body)
-
- if response.status_code == 401:
- return _exceptions.AuthenticationError(err_msg, response=response, body=body)
-
- if response.status_code == 403:
- return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
-
- if response.status_code == 404:
- return _exceptions.NotFoundError(err_msg, response=response, body=body)
-
- if response.status_code == 409:
- return _exceptions.ConflictError(err_msg, response=response, body=body)
-
- if response.status_code == 422:
- return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
-
- if response.status_code == 429:
- return _exceptions.RateLimitError(err_msg, response=response, body=body)
-
- if response.status_code >= 500:
- return _exceptions.InternalServerError(err_msg, response=response, body=body)
- return APIStatusError(err_msg, response=response, body=body)
-
-
-class DigitaloceanGenaiSDKWithRawResponse:
- def __init__(self, client: DigitaloceanGenaiSDK) -> None:
- self.assistants = assistants.AssistantsResourceWithRawResponse(client.assistants)
- self.audio = audio.AudioResourceWithRawResponse(client.audio)
- self.batches = batches.BatchesResourceWithRawResponse(client.batches)
- self.chat = chat.ChatResourceWithRawResponse(client.chat)
- self.completions = completions.CompletionsResourceWithRawResponse(client.completions)
- self.embeddings = embeddings.EmbeddingsResourceWithRawResponse(client.embeddings)
- self.files = files.FilesResourceWithRawResponse(client.files)
- self.fine_tuning = fine_tuning.FineTuningResourceWithRawResponse(client.fine_tuning)
- self.images = images.ImagesResourceWithRawResponse(client.images)
- self.models = models.ModelsResourceWithRawResponse(client.models)
- self.moderations = moderations.ModerationsResourceWithRawResponse(client.moderations)
- self.organization = organization.OrganizationResourceWithRawResponse(client.organization)
- self.realtime = realtime.RealtimeResourceWithRawResponse(client.realtime)
- self.responses = responses.ResponsesResourceWithRawResponse(client.responses)
- self.threads = threads.ThreadsResourceWithRawResponse(client.threads)
- self.uploads = uploads.UploadsResourceWithRawResponse(client.uploads)
- self.vector_stores = vector_stores.VectorStoresResourceWithRawResponse(client.vector_stores)
-
-
-class AsyncDigitaloceanGenaiSDKWithRawResponse:
- def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None:
- self.assistants = assistants.AsyncAssistantsResourceWithRawResponse(client.assistants)
- self.audio = audio.AsyncAudioResourceWithRawResponse(client.audio)
- self.batches = batches.AsyncBatchesResourceWithRawResponse(client.batches)
- self.chat = chat.AsyncChatResourceWithRawResponse(client.chat)
- self.completions = completions.AsyncCompletionsResourceWithRawResponse(client.completions)
- self.embeddings = embeddings.AsyncEmbeddingsResourceWithRawResponse(client.embeddings)
- self.files = files.AsyncFilesResourceWithRawResponse(client.files)
- self.fine_tuning = fine_tuning.AsyncFineTuningResourceWithRawResponse(client.fine_tuning)
- self.images = images.AsyncImagesResourceWithRawResponse(client.images)
- self.models = models.AsyncModelsResourceWithRawResponse(client.models)
- self.moderations = moderations.AsyncModerationsResourceWithRawResponse(client.moderations)
- self.organization = organization.AsyncOrganizationResourceWithRawResponse(client.organization)
- self.realtime = realtime.AsyncRealtimeResourceWithRawResponse(client.realtime)
- self.responses = responses.AsyncResponsesResourceWithRawResponse(client.responses)
- self.threads = threads.AsyncThreadsResourceWithRawResponse(client.threads)
- self.uploads = uploads.AsyncUploadsResourceWithRawResponse(client.uploads)
- self.vector_stores = vector_stores.AsyncVectorStoresResourceWithRawResponse(client.vector_stores)
-
-
-class DigitaloceanGenaiSDKWithStreamedResponse:
- def __init__(self, client: DigitaloceanGenaiSDK) -> None:
- self.assistants = assistants.AssistantsResourceWithStreamingResponse(client.assistants)
- self.audio = audio.AudioResourceWithStreamingResponse(client.audio)
- self.batches = batches.BatchesResourceWithStreamingResponse(client.batches)
- self.chat = chat.ChatResourceWithStreamingResponse(client.chat)
- self.completions = completions.CompletionsResourceWithStreamingResponse(client.completions)
- self.embeddings = embeddings.EmbeddingsResourceWithStreamingResponse(client.embeddings)
- self.files = files.FilesResourceWithStreamingResponse(client.files)
- self.fine_tuning = fine_tuning.FineTuningResourceWithStreamingResponse(client.fine_tuning)
- self.images = images.ImagesResourceWithStreamingResponse(client.images)
- self.models = models.ModelsResourceWithStreamingResponse(client.models)
- self.moderations = moderations.ModerationsResourceWithStreamingResponse(client.moderations)
- self.organization = organization.OrganizationResourceWithStreamingResponse(client.organization)
- self.realtime = realtime.RealtimeResourceWithStreamingResponse(client.realtime)
- self.responses = responses.ResponsesResourceWithStreamingResponse(client.responses)
- self.threads = threads.ThreadsResourceWithStreamingResponse(client.threads)
- self.uploads = uploads.UploadsResourceWithStreamingResponse(client.uploads)
- self.vector_stores = vector_stores.VectorStoresResourceWithStreamingResponse(client.vector_stores)
-
-
-class AsyncDigitaloceanGenaiSDKWithStreamedResponse:
- def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None:
- self.assistants = assistants.AsyncAssistantsResourceWithStreamingResponse(client.assistants)
- self.audio = audio.AsyncAudioResourceWithStreamingResponse(client.audio)
- self.batches = batches.AsyncBatchesResourceWithStreamingResponse(client.batches)
- self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat)
- self.completions = completions.AsyncCompletionsResourceWithStreamingResponse(client.completions)
- self.embeddings = embeddings.AsyncEmbeddingsResourceWithStreamingResponse(client.embeddings)
- self.files = files.AsyncFilesResourceWithStreamingResponse(client.files)
- self.fine_tuning = fine_tuning.AsyncFineTuningResourceWithStreamingResponse(client.fine_tuning)
- self.images = images.AsyncImagesResourceWithStreamingResponse(client.images)
- self.models = models.AsyncModelsResourceWithStreamingResponse(client.models)
- self.moderations = moderations.AsyncModerationsResourceWithStreamingResponse(client.moderations)
- self.organization = organization.AsyncOrganizationResourceWithStreamingResponse(client.organization)
- self.realtime = realtime.AsyncRealtimeResourceWithStreamingResponse(client.realtime)
- self.responses = responses.AsyncResponsesResourceWithStreamingResponse(client.responses)
- self.threads = threads.AsyncThreadsResourceWithStreamingResponse(client.threads)
- self.uploads = uploads.AsyncUploadsResourceWithStreamingResponse(client.uploads)
- self.vector_stores = vector_stores.AsyncVectorStoresResourceWithStreamingResponse(client.vector_stores)
-
-
-Client = DigitaloceanGenaiSDK
-
-AsyncClient = AsyncDigitaloceanGenaiSDK
diff --git a/src/digitalocean_genai_sdk/_compat.py b/src/digitalocean_genai_sdk/_compat.py
deleted file mode 100644
index 92d9ee61..00000000
--- a/src/digitalocean_genai_sdk/_compat.py
+++ /dev/null
@@ -1,219 +0,0 @@
-from __future__ import annotations
-
-from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
-from datetime import date, datetime
-from typing_extensions import Self, Literal
-
-import pydantic
-from pydantic.fields import FieldInfo
-
-from ._types import IncEx, StrBytesIntFloat
-
-_T = TypeVar("_T")
-_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
-
-# --------------- Pydantic v2 compatibility ---------------
-
-# Pyright incorrectly reports some of our functions as overriding a method when they don't
-# pyright: reportIncompatibleMethodOverride=false
-
-PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
-
-# v1 re-exports
-if TYPE_CHECKING:
-
- def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001
- ...
-
- def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: # noqa: ARG001
- ...
-
- def get_args(t: type[Any]) -> tuple[Any, ...]: # noqa: ARG001
- ...
-
- def is_union(tp: type[Any] | None) -> bool: # noqa: ARG001
- ...
-
- def get_origin(t: type[Any]) -> type[Any] | None: # noqa: ARG001
- ...
-
- def is_literal_type(type_: type[Any]) -> bool: # noqa: ARG001
- ...
-
- def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001
- ...
-
-else:
- if PYDANTIC_V2:
- from pydantic.v1.typing import (
- get_args as get_args,
- is_union as is_union,
- get_origin as get_origin,
- is_typeddict as is_typeddict,
- is_literal_type as is_literal_type,
- )
- from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
- else:
- from pydantic.typing import (
- get_args as get_args,
- is_union as is_union,
- get_origin as get_origin,
- is_typeddict as is_typeddict,
- is_literal_type as is_literal_type,
- )
- from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
-
-
-# refactored config
-if TYPE_CHECKING:
- from pydantic import ConfigDict as ConfigDict
-else:
- if PYDANTIC_V2:
- from pydantic import ConfigDict
- else:
- # TODO: provide an error message here?
- ConfigDict = None
-
-
-# renamed methods / properties
-def parse_obj(model: type[_ModelT], value: object) -> _ModelT:
- if PYDANTIC_V2:
- return model.model_validate(value)
- else:
- return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
-
-
-def field_is_required(field: FieldInfo) -> bool:
- if PYDANTIC_V2:
- return field.is_required()
- return field.required # type: ignore
-
-
-def field_get_default(field: FieldInfo) -> Any:
- value = field.get_default()
- if PYDANTIC_V2:
- from pydantic_core import PydanticUndefined
-
- if value == PydanticUndefined:
- return None
- return value
- return value
-
-
-def field_outer_type(field: FieldInfo) -> Any:
- if PYDANTIC_V2:
- return field.annotation
- return field.outer_type_ # type: ignore
-
-
-def get_model_config(model: type[pydantic.BaseModel]) -> Any:
- if PYDANTIC_V2:
- return model.model_config
- return model.__config__ # type: ignore
-
-
-def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]:
- if PYDANTIC_V2:
- return model.model_fields
- return model.__fields__ # type: ignore
-
-
-def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT:
- if PYDANTIC_V2:
- return model.model_copy(deep=deep)
- return model.copy(deep=deep) # type: ignore
-
-
-def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
- if PYDANTIC_V2:
- return model.model_dump_json(indent=indent)
- return model.json(indent=indent) # type: ignore
-
-
-def model_dump(
- model: pydantic.BaseModel,
- *,
- exclude: IncEx | None = None,
- exclude_unset: bool = False,
- exclude_defaults: bool = False,
- warnings: bool = True,
- mode: Literal["json", "python"] = "python",
-) -> dict[str, Any]:
- if PYDANTIC_V2 or hasattr(model, "model_dump"):
- return model.model_dump(
- mode=mode,
- exclude=exclude,
- exclude_unset=exclude_unset,
- exclude_defaults=exclude_defaults,
- # warnings are not supported in Pydantic v1
- warnings=warnings if PYDANTIC_V2 else True,
- )
- return cast(
- "dict[str, Any]",
- model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
- exclude=exclude,
- exclude_unset=exclude_unset,
- exclude_defaults=exclude_defaults,
- ),
- )
-
-
-def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
- if PYDANTIC_V2:
- return model.model_validate(data)
- return model.parse_obj(data) # pyright: ignore[reportDeprecated]
-
-
-# generic models
-if TYPE_CHECKING:
-
- class GenericModel(pydantic.BaseModel): ...
-
-else:
- if PYDANTIC_V2:
- # there no longer needs to be a distinction in v2 but
- # we still have to create our own subclass to avoid
- # inconsistent MRO ordering errors
- class GenericModel(pydantic.BaseModel): ...
-
- else:
- import pydantic.generics
-
- class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
-
-
-# cached properties
-if TYPE_CHECKING:
- cached_property = property
-
- # we define a separate type (copied from typeshed)
- # that represents that `cached_property` is `set`able
- # at runtime, which differs from `@property`.
- #
- # this is a separate type as editors likely special case
- # `@property` and we don't want to cause issues just to have
- # more helpful internal types.
-
- class typed_cached_property(Generic[_T]):
- func: Callable[[Any], _T]
- attrname: str | None
-
- def __init__(self, func: Callable[[Any], _T]) -> None: ...
-
- @overload
- def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...
-
- @overload
- def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...
-
- def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
- raise NotImplementedError()
-
- def __set_name__(self, owner: type[Any], name: str) -> None: ...
-
- # __set__ is not defined at runtime, but @cached_property is designed to be settable
- def __set__(self, instance: object, value: _T) -> None: ...
-else:
- from functools import cached_property as cached_property
-
- typed_cached_property = cached_property
diff --git a/src/digitalocean_genai_sdk/_exceptions.py b/src/digitalocean_genai_sdk/_exceptions.py
deleted file mode 100644
index 755e166e..00000000
--- a/src/digitalocean_genai_sdk/_exceptions.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal
-
-import httpx
-
-__all__ = [
- "BadRequestError",
- "AuthenticationError",
- "PermissionDeniedError",
- "NotFoundError",
- "ConflictError",
- "UnprocessableEntityError",
- "RateLimitError",
- "InternalServerError",
-]
-
-
-class DigitaloceanGenaiSDKError(Exception):
- pass
-
-
-class APIError(DigitaloceanGenaiSDKError):
- message: str
- request: httpx.Request
-
- body: object | None
- """The API response body.
-
- If the API responded with a valid JSON structure then this property will be the
- decoded result.
-
- If it isn't a valid JSON structure then this will be the raw response.
-
- If there was no response associated with this error then it will be `None`.
- """
-
- def __init__(self, message: str, request: httpx.Request, *, body: object | None) -> None: # noqa: ARG002
- super().__init__(message)
- self.request = request
- self.message = message
- self.body = body
-
-
-class APIResponseValidationError(APIError):
- response: httpx.Response
- status_code: int
-
- def __init__(self, response: httpx.Response, body: object | None, *, message: str | None = None) -> None:
- super().__init__(message or "Data returned by API invalid for expected schema.", response.request, body=body)
- self.response = response
- self.status_code = response.status_code
-
-
-class APIStatusError(APIError):
- """Raised when an API response has a status code of 4xx or 5xx."""
-
- response: httpx.Response
- status_code: int
-
- def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None:
- super().__init__(message, response.request, body=body)
- self.response = response
- self.status_code = response.status_code
-
-
-class APIConnectionError(APIError):
- def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None:
- super().__init__(message, request, body=None)
-
-
-class APITimeoutError(APIConnectionError):
- def __init__(self, request: httpx.Request) -> None:
- super().__init__(message="Request timed out.", request=request)
-
-
-class BadRequestError(APIStatusError):
- status_code: Literal[400] = 400 # pyright: ignore[reportIncompatibleVariableOverride]
-
-
-class AuthenticationError(APIStatusError):
- status_code: Literal[401] = 401 # pyright: ignore[reportIncompatibleVariableOverride]
-
-
-class PermissionDeniedError(APIStatusError):
- status_code: Literal[403] = 403 # pyright: ignore[reportIncompatibleVariableOverride]
-
-
-class NotFoundError(APIStatusError):
- status_code: Literal[404] = 404 # pyright: ignore[reportIncompatibleVariableOverride]
-
-
-class ConflictError(APIStatusError):
- status_code: Literal[409] = 409 # pyright: ignore[reportIncompatibleVariableOverride]
-
-
-class UnprocessableEntityError(APIStatusError):
- status_code: Literal[422] = 422 # pyright: ignore[reportIncompatibleVariableOverride]
-
-
-class RateLimitError(APIStatusError):
- status_code: Literal[429] = 429 # pyright: ignore[reportIncompatibleVariableOverride]
-
-
-class InternalServerError(APIStatusError):
- pass
diff --git a/src/digitalocean_genai_sdk/_types.py b/src/digitalocean_genai_sdk/_types.py
deleted file mode 100644
index b2bfbbec..00000000
--- a/src/digitalocean_genai_sdk/_types.py
+++ /dev/null
@@ -1,217 +0,0 @@
-from __future__ import annotations
-
-from os import PathLike
-from typing import (
- IO,
- TYPE_CHECKING,
- Any,
- Dict,
- List,
- Type,
- Tuple,
- Union,
- Mapping,
- TypeVar,
- Callable,
- Optional,
- Sequence,
-)
-from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable
-
-import httpx
-import pydantic
-from httpx import URL, Proxy, Timeout, Response, BaseTransport, AsyncBaseTransport
-
-if TYPE_CHECKING:
- from ._models import BaseModel
- from ._response import APIResponse, AsyncAPIResponse
-
-Transport = BaseTransport
-AsyncTransport = AsyncBaseTransport
-Query = Mapping[str, object]
-Body = object
-AnyMapping = Mapping[str, object]
-ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
-_T = TypeVar("_T")
-
-
-# Approximates httpx internal ProxiesTypes and RequestFiles types
-# while adding support for `PathLike` instances
-ProxiesDict = Dict["str | URL", Union[None, str, URL, Proxy]]
-ProxiesTypes = Union[str, Proxy, ProxiesDict]
-if TYPE_CHECKING:
- Base64FileInput = Union[IO[bytes], PathLike[str]]
- FileContent = Union[IO[bytes], bytes, PathLike[str]]
-else:
- Base64FileInput = Union[IO[bytes], PathLike]
- FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8.
-FileTypes = Union[
- # file (or bytes)
- FileContent,
- # (filename, file (or bytes))
- Tuple[Optional[str], FileContent],
- # (filename, file (or bytes), content_type)
- Tuple[Optional[str], FileContent, Optional[str]],
- # (filename, file (or bytes), content_type, headers)
- Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]],
-]
-RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
-
-# duplicate of the above but without our custom file support
-HttpxFileContent = Union[IO[bytes], bytes]
-HttpxFileTypes = Union[
- # file (or bytes)
- HttpxFileContent,
- # (filename, file (or bytes))
- Tuple[Optional[str], HttpxFileContent],
- # (filename, file (or bytes), content_type)
- Tuple[Optional[str], HttpxFileContent, Optional[str]],
- # (filename, file (or bytes), content_type, headers)
- Tuple[Optional[str], HttpxFileContent, Optional[str], Mapping[str, str]],
-]
-HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[Tuple[str, HttpxFileTypes]]]
-
-# Workaround to support (cast_to: Type[ResponseT]) -> ResponseT
-# where ResponseT includes `None`. In order to support directly
-# passing `None`, overloads would have to be defined for every
-# method that uses `ResponseT` which would lead to an unacceptable
-# amount of code duplication and make it unreadable. See _base_client.py
-# for example usage.
-#
-# This unfortunately means that you will either have
-# to import this type and pass it explicitly:
-#
-# from digitalocean_genai_sdk import NoneType
-# client.get('/foo', cast_to=NoneType)
-#
-# or build it yourself:
-#
-# client.get('/foo', cast_to=type(None))
-if TYPE_CHECKING:
- NoneType: Type[None]
-else:
- NoneType = type(None)
-
-
-class RequestOptions(TypedDict, total=False):
- headers: Headers
- max_retries: int
- timeout: float | Timeout | None
- params: Query
- extra_json: AnyMapping
- idempotency_key: str
-
-
-# Sentinel class used until PEP 0661 is accepted
-class NotGiven:
- """
- A sentinel singleton class used to distinguish omitted keyword arguments
- from those passed in with the value None (which may have different behavior).
-
- For example:
-
- ```py
- def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...
-
-
- get(timeout=1) # 1s timeout
- get(timeout=None) # No timeout
- get() # Default timeout behavior, which may not be statically known at the method definition.
- ```
- """
-
- def __bool__(self) -> Literal[False]:
- return False
-
- @override
- def __repr__(self) -> str:
- return "NOT_GIVEN"
-
-
-NotGivenOr = Union[_T, NotGiven]
-NOT_GIVEN = NotGiven()
-
-
-class Omit:
- """In certain situations you need to be able to represent a case where a default value has
- to be explicitly removed and `None` is not an appropriate substitute, for example:
-
- ```py
- # as the default `Content-Type` header is `application/json` that will be sent
- client.post("/upload/files", files={"file": b"my raw file content"})
-
- # you can't explicitly override the header as it has to be dynamically generated
- # to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983'
- client.post(..., headers={"Content-Type": "multipart/form-data"})
-
- # instead you can remove the default `application/json` header by passing Omit
- client.post(..., headers={"Content-Type": Omit()})
- ```
- """
-
- def __bool__(self) -> Literal[False]:
- return False
-
-
-@runtime_checkable
-class ModelBuilderProtocol(Protocol):
- @classmethod
- def build(
- cls: type[_T],
- *,
- response: Response,
- data: object,
- ) -> _T: ...
-
-
-Headers = Mapping[str, Union[str, Omit]]
-
-
-class HeadersLikeProtocol(Protocol):
- def get(self, __key: str) -> str | None: ...
-
-
-HeadersLike = Union[Headers, HeadersLikeProtocol]
-
-ResponseT = TypeVar(
- "ResponseT",
- bound=Union[
- object,
- str,
- None,
- "BaseModel",
- List[Any],
- Dict[str, Any],
- Response,
- ModelBuilderProtocol,
- "APIResponse[Any]",
- "AsyncAPIResponse[Any]",
- ],
-)
-
-StrBytesIntFloat = Union[str, bytes, int, float]
-
-# Note: copied from Pydantic
-# https://github.com/pydantic/pydantic/blob/6f31f8f68ef011f84357330186f603ff295312fd/pydantic/main.py#L79
-IncEx: TypeAlias = Union[Set[int], Set[str], Mapping[int, Union["IncEx", bool]], Mapping[str, Union["IncEx", bool]]]
-
-PostParser = Callable[[Any], Any]
-
-
-@runtime_checkable
-class InheritsGeneric(Protocol):
- """Represents a type that has inherited from `Generic`
-
- The `__orig_bases__` property can be used to determine the resolved
- type variable for a given base class.
- """
-
- __orig_bases__: tuple[_GenericAlias]
-
-
-class _GenericAlias(Protocol):
- __origin__: type[object]
-
-
-class HttpxSendArgs(TypedDict, total=False):
- auth: httpx.Auth
diff --git a/src/digitalocean_genai_sdk/_utils/__init__.py b/src/digitalocean_genai_sdk/_utils/__init__.py
deleted file mode 100644
index d4fda26f..00000000
--- a/src/digitalocean_genai_sdk/_utils/__init__.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from ._sync import asyncify as asyncify
-from ._proxy import LazyProxy as LazyProxy
-from ._utils import (
- flatten as flatten,
- is_dict as is_dict,
- is_list as is_list,
- is_given as is_given,
- is_tuple as is_tuple,
- json_safe as json_safe,
- lru_cache as lru_cache,
- is_mapping as is_mapping,
- is_tuple_t as is_tuple_t,
- parse_date as parse_date,
- is_iterable as is_iterable,
- is_sequence as is_sequence,
- coerce_float as coerce_float,
- is_mapping_t as is_mapping_t,
- removeprefix as removeprefix,
- removesuffix as removesuffix,
- extract_files as extract_files,
- is_sequence_t as is_sequence_t,
- required_args as required_args,
- coerce_boolean as coerce_boolean,
- coerce_integer as coerce_integer,
- file_from_path as file_from_path,
- parse_datetime as parse_datetime,
- strip_not_given as strip_not_given,
- deepcopy_minimal as deepcopy_minimal,
- get_async_library as get_async_library,
- maybe_coerce_float as maybe_coerce_float,
- get_required_header as get_required_header,
- maybe_coerce_boolean as maybe_coerce_boolean,
- maybe_coerce_integer as maybe_coerce_integer,
-)
-from ._typing import (
- is_list_type as is_list_type,
- is_union_type as is_union_type,
- extract_type_arg as extract_type_arg,
- is_iterable_type as is_iterable_type,
- is_required_type as is_required_type,
- is_annotated_type as is_annotated_type,
- is_type_alias_type as is_type_alias_type,
- strip_annotated_type as strip_annotated_type,
- extract_type_var_from_base as extract_type_var_from_base,
-)
-from ._streams import consume_sync_iterator as consume_sync_iterator, consume_async_iterator as consume_async_iterator
-from ._transform import (
- PropertyInfo as PropertyInfo,
- transform as transform,
- async_transform as async_transform,
- maybe_transform as maybe_transform,
- async_maybe_transform as async_maybe_transform,
-)
-from ._reflection import (
- function_has_argument as function_has_argument,
- assert_signatures_in_sync as assert_signatures_in_sync,
-)
diff --git a/src/digitalocean_genai_sdk/_utils/_logs.py b/src/digitalocean_genai_sdk/_utils/_logs.py
deleted file mode 100644
index e0c1fee5..00000000
--- a/src/digitalocean_genai_sdk/_utils/_logs.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os
-import logging
-
-logger: logging.Logger = logging.getLogger("digitalocean_genai_sdk")
-httpx_logger: logging.Logger = logging.getLogger("httpx")
-
-
-def _basic_config() -> None:
- # e.g. [2023-10-05 14:12:26 - digitalocean_genai_sdk._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK"
- logging.basicConfig(
- format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
- datefmt="%Y-%m-%d %H:%M:%S",
- )
-
-
-def setup_logging() -> None:
- env = os.environ.get("DIGITALOCEAN_GENAI_SDK_LOG")
- if env == "debug":
- _basic_config()
- logger.setLevel(logging.DEBUG)
- httpx_logger.setLevel(logging.DEBUG)
- elif env == "info":
- _basic_config()
- logger.setLevel(logging.INFO)
- httpx_logger.setLevel(logging.INFO)
diff --git a/src/digitalocean_genai_sdk/_utils/_resources_proxy.py b/src/digitalocean_genai_sdk/_utils/_resources_proxy.py
deleted file mode 100644
index 4ebaf7a4..00000000
--- a/src/digitalocean_genai_sdk/_utils/_resources_proxy.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from __future__ import annotations
-
-from typing import Any
-from typing_extensions import override
-
-from ._proxy import LazyProxy
-
-
-class ResourcesProxy(LazyProxy[Any]):
- """A proxy for the `digitalocean_genai_sdk.resources` module.
-
- This is used so that we can lazily import `digitalocean_genai_sdk.resources` only when
- needed *and* so that users can just import `digitalocean_genai_sdk` and reference `digitalocean_genai_sdk.resources`
- """
-
- @override
- def __load__(self) -> Any:
- import importlib
-
- mod = importlib.import_module("digitalocean_genai_sdk.resources")
- return mod
-
-
-resources = ResourcesProxy().__as_proxied__()
diff --git a/src/digitalocean_genai_sdk/_utils/_sync.py b/src/digitalocean_genai_sdk/_utils/_sync.py
deleted file mode 100644
index ad7ec71b..00000000
--- a/src/digitalocean_genai_sdk/_utils/_sync.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from __future__ import annotations
-
-import sys
-import asyncio
-import functools
-import contextvars
-from typing import Any, TypeVar, Callable, Awaitable
-from typing_extensions import ParamSpec
-
-import anyio
-import sniffio
-import anyio.to_thread
-
-T_Retval = TypeVar("T_Retval")
-T_ParamSpec = ParamSpec("T_ParamSpec")
-
-
-if sys.version_info >= (3, 9):
- _asyncio_to_thread = asyncio.to_thread
-else:
- # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread
- # for Python 3.8 support
- async def _asyncio_to_thread(
- func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
- ) -> Any:
- """Asynchronously run function *func* in a separate thread.
-
- Any *args and **kwargs supplied for this function are directly passed
- to *func*. Also, the current :class:`contextvars.Context` is propagated,
- allowing context variables from the main thread to be accessed in the
- separate thread.
-
- Returns a coroutine that can be awaited to get the eventual result of *func*.
- """
- loop = asyncio.events.get_running_loop()
- ctx = contextvars.copy_context()
- func_call = functools.partial(ctx.run, func, *args, **kwargs)
- return await loop.run_in_executor(None, func_call)
-
-
-async def to_thread(
- func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
-) -> T_Retval:
- if sniffio.current_async_library() == "asyncio":
- return await _asyncio_to_thread(func, *args, **kwargs)
-
- return await anyio.to_thread.run_sync(
- functools.partial(func, *args, **kwargs),
- )
-
-
-# inspired by `asyncer`, https://github.com/tiangolo/asyncer
-def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]:
- """
- Take a blocking function and create an async one that receives the same
- positional and keyword arguments. For python version 3.9 and above, it uses
- asyncio.to_thread to run the function in a separate thread. For python version
- 3.8, it uses locally defined copy of the asyncio.to_thread function which was
- introduced in python 3.9.
-
- Usage:
-
- ```python
- def blocking_func(arg1, arg2, kwarg1=None):
- # blocking code
- return result
-
-
- result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1)
- ```
-
- ## Arguments
-
- `function`: a blocking regular callable (e.g. a function)
-
- ## Return
-
- An async function that takes the same positional and keyword arguments as the
- original one, that when called runs the same original function in a thread worker
- and returns the result.
- """
-
- async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval:
- return await to_thread(function, *args, **kwargs)
-
- return wrapper
diff --git a/src/digitalocean_genai_sdk/_version.py b/src/digitalocean_genai_sdk/_version.py
deleted file mode 100644
index 5c4fa53a..00000000
--- a/src/digitalocean_genai_sdk/_version.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-__title__ = "digitalocean_genai_sdk"
-__version__ = "0.0.1-alpha.0"
diff --git a/src/digitalocean_genai_sdk/resources/__init__.py b/src/digitalocean_genai_sdk/resources/__init__.py
deleted file mode 100644
index 237b0ca7..00000000
--- a/src/digitalocean_genai_sdk/resources/__init__.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .chat import (
- ChatResource,
- AsyncChatResource,
- ChatResourceWithRawResponse,
- AsyncChatResourceWithRawResponse,
- ChatResourceWithStreamingResponse,
- AsyncChatResourceWithStreamingResponse,
-)
-from .audio import (
- AudioResource,
- AsyncAudioResource,
- AudioResourceWithRawResponse,
- AsyncAudioResourceWithRawResponse,
- AudioResourceWithStreamingResponse,
- AsyncAudioResourceWithStreamingResponse,
-)
-from .files import (
- FilesResource,
- AsyncFilesResource,
- FilesResourceWithRawResponse,
- AsyncFilesResourceWithRawResponse,
- FilesResourceWithStreamingResponse,
- AsyncFilesResourceWithStreamingResponse,
-)
-from .images import (
- ImagesResource,
- AsyncImagesResource,
- ImagesResourceWithRawResponse,
- AsyncImagesResourceWithRawResponse,
- ImagesResourceWithStreamingResponse,
- AsyncImagesResourceWithStreamingResponse,
-)
-from .models import (
- ModelsResource,
- AsyncModelsResource,
- ModelsResourceWithRawResponse,
- AsyncModelsResourceWithRawResponse,
- ModelsResourceWithStreamingResponse,
- AsyncModelsResourceWithStreamingResponse,
-)
-from .batches import (
- BatchesResource,
- AsyncBatchesResource,
- BatchesResourceWithRawResponse,
- AsyncBatchesResourceWithRawResponse,
- BatchesResourceWithStreamingResponse,
- AsyncBatchesResourceWithStreamingResponse,
-)
-from .threads import (
- ThreadsResource,
- AsyncThreadsResource,
- ThreadsResourceWithRawResponse,
- AsyncThreadsResourceWithRawResponse,
- ThreadsResourceWithStreamingResponse,
- AsyncThreadsResourceWithStreamingResponse,
-)
-from .uploads import (
- UploadsResource,
- AsyncUploadsResource,
- UploadsResourceWithRawResponse,
- AsyncUploadsResourceWithRawResponse,
- UploadsResourceWithStreamingResponse,
- AsyncUploadsResourceWithStreamingResponse,
-)
-from .realtime import (
- RealtimeResource,
- AsyncRealtimeResource,
- RealtimeResourceWithRawResponse,
- AsyncRealtimeResourceWithRawResponse,
- RealtimeResourceWithStreamingResponse,
- AsyncRealtimeResourceWithStreamingResponse,
-)
-from .responses import (
- ResponsesResource,
- AsyncResponsesResource,
- ResponsesResourceWithRawResponse,
- AsyncResponsesResourceWithRawResponse,
- ResponsesResourceWithStreamingResponse,
- AsyncResponsesResourceWithStreamingResponse,
-)
-from .assistants import (
- AssistantsResource,
- AsyncAssistantsResource,
- AssistantsResourceWithRawResponse,
- AsyncAssistantsResourceWithRawResponse,
- AssistantsResourceWithStreamingResponse,
- AsyncAssistantsResourceWithStreamingResponse,
-)
-from .embeddings import (
- EmbeddingsResource,
- AsyncEmbeddingsResource,
- EmbeddingsResourceWithRawResponse,
- AsyncEmbeddingsResourceWithRawResponse,
- EmbeddingsResourceWithStreamingResponse,
- AsyncEmbeddingsResourceWithStreamingResponse,
-)
-from .completions import (
- CompletionsResource,
- AsyncCompletionsResource,
- CompletionsResourceWithRawResponse,
- AsyncCompletionsResourceWithRawResponse,
- CompletionsResourceWithStreamingResponse,
- AsyncCompletionsResourceWithStreamingResponse,
-)
-from .fine_tuning import (
- FineTuningResource,
- AsyncFineTuningResource,
- FineTuningResourceWithRawResponse,
- AsyncFineTuningResourceWithRawResponse,
- FineTuningResourceWithStreamingResponse,
- AsyncFineTuningResourceWithStreamingResponse,
-)
-from .moderations import (
- ModerationsResource,
- AsyncModerationsResource,
- ModerationsResourceWithRawResponse,
- AsyncModerationsResourceWithRawResponse,
- ModerationsResourceWithStreamingResponse,
- AsyncModerationsResourceWithStreamingResponse,
-)
-from .organization import (
- OrganizationResource,
- AsyncOrganizationResource,
- OrganizationResourceWithRawResponse,
- AsyncOrganizationResourceWithRawResponse,
- OrganizationResourceWithStreamingResponse,
- AsyncOrganizationResourceWithStreamingResponse,
-)
-from .vector_stores import (
- VectorStoresResource,
- AsyncVectorStoresResource,
- VectorStoresResourceWithRawResponse,
- AsyncVectorStoresResourceWithRawResponse,
- VectorStoresResourceWithStreamingResponse,
- AsyncVectorStoresResourceWithStreamingResponse,
-)
-
-__all__ = [
- "AssistantsResource",
- "AsyncAssistantsResource",
- "AssistantsResourceWithRawResponse",
- "AsyncAssistantsResourceWithRawResponse",
- "AssistantsResourceWithStreamingResponse",
- "AsyncAssistantsResourceWithStreamingResponse",
- "AudioResource",
- "AsyncAudioResource",
- "AudioResourceWithRawResponse",
- "AsyncAudioResourceWithRawResponse",
- "AudioResourceWithStreamingResponse",
- "AsyncAudioResourceWithStreamingResponse",
- "BatchesResource",
- "AsyncBatchesResource",
- "BatchesResourceWithRawResponse",
- "AsyncBatchesResourceWithRawResponse",
- "BatchesResourceWithStreamingResponse",
- "AsyncBatchesResourceWithStreamingResponse",
- "ChatResource",
- "AsyncChatResource",
- "ChatResourceWithRawResponse",
- "AsyncChatResourceWithRawResponse",
- "ChatResourceWithStreamingResponse",
- "AsyncChatResourceWithStreamingResponse",
- "CompletionsResource",
- "AsyncCompletionsResource",
- "CompletionsResourceWithRawResponse",
- "AsyncCompletionsResourceWithRawResponse",
- "CompletionsResourceWithStreamingResponse",
- "AsyncCompletionsResourceWithStreamingResponse",
- "EmbeddingsResource",
- "AsyncEmbeddingsResource",
- "EmbeddingsResourceWithRawResponse",
- "AsyncEmbeddingsResourceWithRawResponse",
- "EmbeddingsResourceWithStreamingResponse",
- "AsyncEmbeddingsResourceWithStreamingResponse",
- "FilesResource",
- "AsyncFilesResource",
- "FilesResourceWithRawResponse",
- "AsyncFilesResourceWithRawResponse",
- "FilesResourceWithStreamingResponse",
- "AsyncFilesResourceWithStreamingResponse",
- "FineTuningResource",
- "AsyncFineTuningResource",
- "FineTuningResourceWithRawResponse",
- "AsyncFineTuningResourceWithRawResponse",
- "FineTuningResourceWithStreamingResponse",
- "AsyncFineTuningResourceWithStreamingResponse",
- "ImagesResource",
- "AsyncImagesResource",
- "ImagesResourceWithRawResponse",
- "AsyncImagesResourceWithRawResponse",
- "ImagesResourceWithStreamingResponse",
- "AsyncImagesResourceWithStreamingResponse",
- "ModelsResource",
- "AsyncModelsResource",
- "ModelsResourceWithRawResponse",
- "AsyncModelsResourceWithRawResponse",
- "ModelsResourceWithStreamingResponse",
- "AsyncModelsResourceWithStreamingResponse",
- "ModerationsResource",
- "AsyncModerationsResource",
- "ModerationsResourceWithRawResponse",
- "AsyncModerationsResourceWithRawResponse",
- "ModerationsResourceWithStreamingResponse",
- "AsyncModerationsResourceWithStreamingResponse",
- "OrganizationResource",
- "AsyncOrganizationResource",
- "OrganizationResourceWithRawResponse",
- "AsyncOrganizationResourceWithRawResponse",
- "OrganizationResourceWithStreamingResponse",
- "AsyncOrganizationResourceWithStreamingResponse",
- "RealtimeResource",
- "AsyncRealtimeResource",
- "RealtimeResourceWithRawResponse",
- "AsyncRealtimeResourceWithRawResponse",
- "RealtimeResourceWithStreamingResponse",
- "AsyncRealtimeResourceWithStreamingResponse",
- "ResponsesResource",
- "AsyncResponsesResource",
- "ResponsesResourceWithRawResponse",
- "AsyncResponsesResourceWithRawResponse",
- "ResponsesResourceWithStreamingResponse",
- "AsyncResponsesResourceWithStreamingResponse",
- "ThreadsResource",
- "AsyncThreadsResource",
- "ThreadsResourceWithRawResponse",
- "AsyncThreadsResourceWithRawResponse",
- "ThreadsResourceWithStreamingResponse",
- "AsyncThreadsResourceWithStreamingResponse",
- "UploadsResource",
- "AsyncUploadsResource",
- "UploadsResourceWithRawResponse",
- "AsyncUploadsResourceWithRawResponse",
- "UploadsResourceWithStreamingResponse",
- "AsyncUploadsResourceWithStreamingResponse",
- "VectorStoresResource",
- "AsyncVectorStoresResource",
- "VectorStoresResourceWithRawResponse",
- "AsyncVectorStoresResourceWithRawResponse",
- "VectorStoresResourceWithStreamingResponse",
- "AsyncVectorStoresResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/assistants.py b/src/digitalocean_genai_sdk/resources/assistants.py
deleted file mode 100644
index c6ae36f5..00000000
--- a/src/digitalocean_genai_sdk/resources/assistants.py
+++ /dev/null
@@ -1,910 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import (
- ReasoningEffort,
- assistant_list_params,
- assistant_create_params,
- assistant_update_params,
-)
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.assistant_object import AssistantObject
-from ..types.reasoning_effort import ReasoningEffort
-from ..types.assistant_list_response import AssistantListResponse
-from ..types.assistant_delete_response import AssistantDeleteResponse
-from ..types.assistant_supported_models import AssistantSupportedModels
-from ..types.assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-
-__all__ = ["AssistantsResource", "AsyncAssistantsResource"]
-
-
-class AssistantsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AssistantsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AssistantsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AssistantsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AssistantsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- model: Union[str, AssistantSupportedModels],
- description: Optional[str] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """
- Create an assistant with a model and instructions.
-
- Args:
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- description: The description of the assistant. The maximum length is 512 characters.
-
- instructions: The system instructions that the assistant uses. The maximum length is 256,000
- characters.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the assistant. The maximum length is 256 characters.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `file_search`, or
- `function`.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/assistants",
- body=maybe_transform(
- {
- "model": model,
- "description": description,
- "instructions": instructions,
- "metadata": metadata,
- "name": name,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "temperature": temperature,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- },
- assistant_create_params.AssistantCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- def retrieve(
- self,
- assistant_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """
- Retrieves an assistant.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return self._get(
- f"/assistants/{assistant_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- def update(
- self,
- assistant_id: str,
- *,
- description: Optional[str] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[str, AssistantSupportedModels] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """Modifies an assistant.
-
- Args:
- description: The description of the assistant.
-
- The maximum length is 512 characters.
-
- instructions: The system instructions that the assistant uses. The maximum length is 256,000
- characters.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- name: The name of the assistant. The maximum length is 256 characters.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `file_search`, or
- `function`.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return self._post(
- f"/assistants/{assistant_id}",
- body=maybe_transform(
- {
- "description": description,
- "instructions": instructions,
- "metadata": metadata,
- "model": model,
- "name": name,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "temperature": temperature,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- },
- assistant_update_params.AssistantUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantListResponse:
- """Returns a list of assistants.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/assistants",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- assistant_list_params.AssistantListParams,
- ),
- ),
- cast_to=AssistantListResponse,
- )
-
- def delete(
- self,
- assistant_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantDeleteResponse:
- """
- Delete an assistant.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return self._delete(
- f"/assistants/{assistant_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantDeleteResponse,
- )
-
-
-class AsyncAssistantsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncAssistantsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncAssistantsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncAssistantsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncAssistantsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- model: Union[str, AssistantSupportedModels],
- description: Optional[str] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """
- Create an assistant with a model and instructions.
-
- Args:
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- description: The description of the assistant. The maximum length is 512 characters.
-
- instructions: The system instructions that the assistant uses. The maximum length is 256,000
- characters.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the assistant. The maximum length is 256 characters.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `file_search`, or
- `function`.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/assistants",
- body=await async_maybe_transform(
- {
- "model": model,
- "description": description,
- "instructions": instructions,
- "metadata": metadata,
- "name": name,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "temperature": temperature,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- },
- assistant_create_params.AssistantCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- async def retrieve(
- self,
- assistant_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """
- Retrieves an assistant.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return await self._get(
- f"/assistants/{assistant_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- async def update(
- self,
- assistant_id: str,
- *,
- description: Optional[str] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[str, AssistantSupportedModels] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantObject:
- """Modifies an assistant.
-
- Args:
- description: The description of the assistant.
-
- The maximum length is 512 characters.
-
- instructions: The system instructions that the assistant uses. The maximum length is 256,000
- characters.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- name: The name of the assistant. The maximum length is 256 characters.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- assistant. Tools can be of types `code_interpreter`, `file_search`, or
- `function`.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return await self._post(
- f"/assistants/{assistant_id}",
- body=await async_maybe_transform(
- {
- "description": description,
- "instructions": instructions,
- "metadata": metadata,
- "model": model,
- "name": name,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "temperature": temperature,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- },
- assistant_update_params.AssistantUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantObject,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantListResponse:
- """Returns a list of assistants.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/assistants",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- assistant_list_params.AssistantListParams,
- ),
- ),
- cast_to=AssistantListResponse,
- )
-
- async def delete(
- self,
- assistant_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AssistantDeleteResponse:
- """
- Delete an assistant.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not assistant_id:
- raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
- return await self._delete(
- f"/assistants/{assistant_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AssistantDeleteResponse,
- )
-
-
-class AssistantsResourceWithRawResponse:
- def __init__(self, assistants: AssistantsResource) -> None:
- self._assistants = assistants
-
- self.create = to_raw_response_wrapper(
- assistants.create,
- )
- self.retrieve = to_raw_response_wrapper(
- assistants.retrieve,
- )
- self.update = to_raw_response_wrapper(
- assistants.update,
- )
- self.list = to_raw_response_wrapper(
- assistants.list,
- )
- self.delete = to_raw_response_wrapper(
- assistants.delete,
- )
-
-
-class AsyncAssistantsResourceWithRawResponse:
- def __init__(self, assistants: AsyncAssistantsResource) -> None:
- self._assistants = assistants
-
- self.create = async_to_raw_response_wrapper(
- assistants.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- assistants.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- assistants.update,
- )
- self.list = async_to_raw_response_wrapper(
- assistants.list,
- )
- self.delete = async_to_raw_response_wrapper(
- assistants.delete,
- )
-
-
-class AssistantsResourceWithStreamingResponse:
- def __init__(self, assistants: AssistantsResource) -> None:
- self._assistants = assistants
-
- self.create = to_streamed_response_wrapper(
- assistants.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- assistants.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- assistants.update,
- )
- self.list = to_streamed_response_wrapper(
- assistants.list,
- )
- self.delete = to_streamed_response_wrapper(
- assistants.delete,
- )
-
-
-class AsyncAssistantsResourceWithStreamingResponse:
- def __init__(self, assistants: AsyncAssistantsResource) -> None:
- self._assistants = assistants
-
- self.create = async_to_streamed_response_wrapper(
- assistants.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- assistants.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- assistants.update,
- )
- self.list = async_to_streamed_response_wrapper(
- assistants.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- assistants.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/audio.py b/src/digitalocean_genai_sdk/resources/audio.py
deleted file mode 100644
index 7cecbe6d..00000000
--- a/src/digitalocean_genai_sdk/resources/audio.py
+++ /dev/null
@@ -1,650 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Any, List, Union, Mapping, Optional, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import (
- audio_generate_speech_params,
- audio_translate_audio_params,
- audio_transcribe_audio_params,
-)
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- BinaryAPIResponse,
- AsyncBinaryAPIResponse,
- StreamedBinaryAPIResponse,
- AsyncStreamedBinaryAPIResponse,
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- to_custom_raw_response_wrapper,
- async_to_streamed_response_wrapper,
- to_custom_streamed_response_wrapper,
- async_to_custom_raw_response_wrapper,
- async_to_custom_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.voice_ids_shared_param import VoiceIDsSharedParam
-from ..types.audio_translate_audio_response import AudioTranslateAudioResponse
-from ..types.audio_transcribe_audio_response import AudioTranscribeAudioResponse
-
-__all__ = ["AudioResource", "AsyncAudioResource"]
-
-
-class AudioResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AudioResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AudioResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AudioResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AudioResourceWithStreamingResponse(self)
-
- def generate_speech(
- self,
- *,
- input: str,
- model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]],
- voice: VoiceIDsSharedParam,
- instructions: str | NotGiven = NOT_GIVEN,
- response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
- speed: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> BinaryAPIResponse:
- """
- Generates audio from the input text.
-
- Args:
- input: The text to generate audio for. The maximum length is 4096 characters.
-
- model: One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or
- `gpt-4o-mini-tts`.
-
- voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
- `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
- `verse`. Previews of the voices are available in the
- [Text to speech guide](/docs/guides/text-to-speech#voice-options).
-
- instructions: Control the voice of your generated audio with additional instructions. Does not
- work with `tts-1` or `tts-1-hd`.
-
- response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
- `wav`, and `pcm`.
-
- speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
- the default.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
- return self._post(
- "/audio/speech",
- body=maybe_transform(
- {
- "input": input,
- "model": model,
- "voice": voice,
- "instructions": instructions,
- "response_format": response_format,
- "speed": speed,
- },
- audio_generate_speech_params.AudioGenerateSpeechParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=BinaryAPIResponse,
- )
-
- def transcribe_audio(
- self,
- *,
- file: FileTypes,
- model: Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]],
- include: List[Literal["logprobs"]] | NotGiven = NOT_GIVEN,
- language: str | NotGiven = NOT_GIVEN,
- prompt: str | NotGiven = NOT_GIVEN,
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AudioTranscribeAudioResponse:
- """
- Transcribes audio into the input language.
-
- Args:
- file:
- The audio file object (not file name) to transcribe, in one of these formats:
- flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-
- model: ID of the model to use. The options are `gpt-4o-transcribe`,
- `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
- Whisper V2 model).
-
- include: Additional information to include in the transcription response. `logprobs` will
- return the log probabilities of the tokens in the response to understand the
- model's confidence in the transcription. `logprobs` only works with
- response_format set to `json` and only with the models `gpt-4o-transcribe` and
- `gpt-4o-mini-transcribe`.
-
- language: The language of the input audio. Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
-
- prompt: An optional text to guide the model's style or continue a previous audio
- segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the
- audio language.
-
- response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
- the only supported format is `json`.
-
- stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the
- [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
- for more information.
-
- Note: Streaming is not supported for the `whisper-1` model and will be ignored.
-
- temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
- output more random, while lower values like 0.2 will make it more focused and
- deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
-
- timestamp_granularities: The timestamp granularities to populate for this transcription.
- `response_format` must be set `verbose_json` to use timestamp granularities.
- Either or both of these options are supported: `word`, or `segment`. Note: There
- is no additional latency for segment timestamps, but generating word timestamps
- incurs additional latency.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "model": model,
- "include": include,
- "language": language,
- "prompt": prompt,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "timestamp_granularities": timestamp_granularities,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return cast(
- AudioTranscribeAudioResponse,
- self._post(
- "/audio/transcriptions",
- body=maybe_transform(body, audio_transcribe_audio_params.AudioTranscribeAudioParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=cast(
- Any, AudioTranscribeAudioResponse
- ), # Union types cannot be passed in as arguments in the type system
- ),
- )
-
- def translate_audio(
- self,
- *,
- file: FileTypes,
- model: Union[str, Literal["whisper-1"]],
- prompt: str | NotGiven = NOT_GIVEN,
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AudioTranslateAudioResponse:
- """
- Translates audio into English.
-
- Args:
- file: The audio file object (not file name) translate, in one of these formats: flac,
- mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-
- model: ID of the model to use. Only `whisper-1` (which is powered by our open source
- Whisper V2 model) is currently available.
-
- prompt: An optional text to guide the model's style or continue a previous audio
- segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in
- English.
-
- response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`.
-
- temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
- output more random, while lower values like 0.2 will make it more focused and
- deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "model": model,
- "prompt": prompt,
- "response_format": response_format,
- "temperature": temperature,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return cast(
- AudioTranslateAudioResponse,
- self._post(
- "/audio/translations",
- body=maybe_transform(body, audio_translate_audio_params.AudioTranslateAudioParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=cast(
- Any, AudioTranslateAudioResponse
- ), # Union types cannot be passed in as arguments in the type system
- ),
- )
-
-
-class AsyncAudioResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncAudioResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncAudioResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncAudioResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncAudioResourceWithStreamingResponse(self)
-
- async def generate_speech(
- self,
- *,
- input: str,
- model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]],
- voice: VoiceIDsSharedParam,
- instructions: str | NotGiven = NOT_GIVEN,
- response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
- speed: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncBinaryAPIResponse:
- """
- Generates audio from the input text.
-
- Args:
- input: The text to generate audio for. The maximum length is 4096 characters.
-
- model: One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or
- `gpt-4o-mini-tts`.
-
- voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
- `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
- `verse`. Previews of the voices are available in the
- [Text to speech guide](/docs/guides/text-to-speech#voice-options).
-
- instructions: Control the voice of your generated audio with additional instructions. Does not
- work with `tts-1` or `tts-1-hd`.
-
- response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
- `wav`, and `pcm`.
-
- speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
- the default.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
- return await self._post(
- "/audio/speech",
- body=await async_maybe_transform(
- {
- "input": input,
- "model": model,
- "voice": voice,
- "instructions": instructions,
- "response_format": response_format,
- "speed": speed,
- },
- audio_generate_speech_params.AudioGenerateSpeechParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AsyncBinaryAPIResponse,
- )
-
- async def transcribe_audio(
- self,
- *,
- file: FileTypes,
- model: Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]],
- include: List[Literal["logprobs"]] | NotGiven = NOT_GIVEN,
- language: str | NotGiven = NOT_GIVEN,
- prompt: str | NotGiven = NOT_GIVEN,
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AudioTranscribeAudioResponse:
- """
- Transcribes audio into the input language.
-
- Args:
- file:
- The audio file object (not file name) to transcribe, in one of these formats:
- flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-
- model: ID of the model to use. The options are `gpt-4o-transcribe`,
- `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
- Whisper V2 model).
-
- include: Additional information to include in the transcription response. `logprobs` will
- return the log probabilities of the tokens in the response to understand the
- model's confidence in the transcription. `logprobs` only works with
- response_format set to `json` and only with the models `gpt-4o-transcribe` and
- `gpt-4o-mini-transcribe`.
-
- language: The language of the input audio. Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
-
- prompt: An optional text to guide the model's style or continue a previous audio
- segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the
- audio language.
-
- response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
- the only supported format is `json`.
-
- stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the
- [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
- for more information.
-
- Note: Streaming is not supported for the `whisper-1` model and will be ignored.
-
- temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
- output more random, while lower values like 0.2 will make it more focused and
- deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
-
- timestamp_granularities: The timestamp granularities to populate for this transcription.
- `response_format` must be set `verbose_json` to use timestamp granularities.
- Either or both of these options are supported: `word`, or `segment`. Note: There
- is no additional latency for segment timestamps, but generating word timestamps
- incurs additional latency.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "model": model,
- "include": include,
- "language": language,
- "prompt": prompt,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "timestamp_granularities": timestamp_granularities,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return cast(
- AudioTranscribeAudioResponse,
- await self._post(
- "/audio/transcriptions",
- body=await async_maybe_transform(body, audio_transcribe_audio_params.AudioTranscribeAudioParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=cast(
- Any, AudioTranscribeAudioResponse
- ), # Union types cannot be passed in as arguments in the type system
- ),
- )
-
- async def translate_audio(
- self,
- *,
- file: FileTypes,
- model: Union[str, Literal["whisper-1"]],
- prompt: str | NotGiven = NOT_GIVEN,
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AudioTranslateAudioResponse:
- """
- Translates audio into English.
-
- Args:
- file: The audio file object (not file name) translate, in one of these formats: flac,
- mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-
- model: ID of the model to use. Only `whisper-1` (which is powered by our open source
- Whisper V2 model) is currently available.
-
- prompt: An optional text to guide the model's style or continue a previous audio
- segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in
- English.
-
- response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`.
-
- temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
- output more random, while lower values like 0.2 will make it more focused and
- deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "model": model,
- "prompt": prompt,
- "response_format": response_format,
- "temperature": temperature,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return cast(
- AudioTranslateAudioResponse,
- await self._post(
- "/audio/translations",
- body=await async_maybe_transform(body, audio_translate_audio_params.AudioTranslateAudioParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=cast(
- Any, AudioTranslateAudioResponse
- ), # Union types cannot be passed in as arguments in the type system
- ),
- )
-
-
-class AudioResourceWithRawResponse:
- def __init__(self, audio: AudioResource) -> None:
- self._audio = audio
-
- self.generate_speech = to_custom_raw_response_wrapper(
- audio.generate_speech,
- BinaryAPIResponse,
- )
- self.transcribe_audio = to_raw_response_wrapper(
- audio.transcribe_audio,
- )
- self.translate_audio = to_raw_response_wrapper(
- audio.translate_audio,
- )
-
-
-class AsyncAudioResourceWithRawResponse:
- def __init__(self, audio: AsyncAudioResource) -> None:
- self._audio = audio
-
- self.generate_speech = async_to_custom_raw_response_wrapper(
- audio.generate_speech,
- AsyncBinaryAPIResponse,
- )
- self.transcribe_audio = async_to_raw_response_wrapper(
- audio.transcribe_audio,
- )
- self.translate_audio = async_to_raw_response_wrapper(
- audio.translate_audio,
- )
-
-
-class AudioResourceWithStreamingResponse:
- def __init__(self, audio: AudioResource) -> None:
- self._audio = audio
-
- self.generate_speech = to_custom_streamed_response_wrapper(
- audio.generate_speech,
- StreamedBinaryAPIResponse,
- )
- self.transcribe_audio = to_streamed_response_wrapper(
- audio.transcribe_audio,
- )
- self.translate_audio = to_streamed_response_wrapper(
- audio.translate_audio,
- )
-
-
-class AsyncAudioResourceWithStreamingResponse:
- def __init__(self, audio: AsyncAudioResource) -> None:
- self._audio = audio
-
- self.generate_speech = async_to_custom_streamed_response_wrapper(
- audio.generate_speech,
- AsyncStreamedBinaryAPIResponse,
- )
- self.transcribe_audio = async_to_streamed_response_wrapper(
- audio.transcribe_audio,
- )
- self.translate_audio = async_to_streamed_response_wrapper(
- audio.translate_audio,
- )
diff --git a/src/digitalocean_genai_sdk/resources/batches.py b/src/digitalocean_genai_sdk/resources/batches.py
deleted file mode 100644
index a2b1fedf..00000000
--- a/src/digitalocean_genai_sdk/resources/batches.py
+++ /dev/null
@@ -1,513 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import batch_list_params, batch_create_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..types.batch import Batch
-from .._base_client import make_request_options
-from ..types.batch_list_response import BatchListResponse
-
-__all__ = ["BatchesResource", "AsyncBatchesResource"]
-
-
-class BatchesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> BatchesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return BatchesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> BatchesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return BatchesResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- completion_window: Literal["24h"],
- endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
- input_file_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """
- Creates and executes a batch from an uploaded file of requests
-
- Args:
- completion_window: The time frame within which the batch should be processed. Currently only `24h`
- is supported.
-
- endpoint: The endpoint to be used for all requests in the batch. Currently
- `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
- are supported. Note that `/v1/embeddings` batches are also restricted to a
- maximum of 50,000 embedding inputs across all requests in the batch.
-
- input_file_id: The ID of an uploaded file that contains requests for the new batch.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your input file must be formatted as a
- [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with
- the purpose `batch`. The file can contain up to 50,000 requests, and can be up
- to 200 MB in size.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/batches",
- body=maybe_transform(
- {
- "completion_window": completion_window,
- "endpoint": endpoint,
- "input_file_id": input_file_id,
- "metadata": metadata,
- },
- batch_create_params.BatchCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
- def retrieve(
- self,
- batch_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """
- Retrieves a batch.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._get(
- f"/batches/{batch_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> BatchListResponse:
- """List your organization's batches.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/batches",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- batch_list_params.BatchListParams,
- ),
- ),
- cast_to=BatchListResponse,
- )
-
- def cancel(
- self,
- batch_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """Cancels an in-progress batch.
-
- The batch will be in status `cancelling` for up to
- 10 minutes, before changing to `cancelled`, where it will have partial results
- (if any) available in the output file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._post(
- f"/batches/{batch_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
-
-class AsyncBatchesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncBatchesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncBatchesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncBatchesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncBatchesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- completion_window: Literal["24h"],
- endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
- input_file_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """
- Creates and executes a batch from an uploaded file of requests
-
- Args:
- completion_window: The time frame within which the batch should be processed. Currently only `24h`
- is supported.
-
- endpoint: The endpoint to be used for all requests in the batch. Currently
- `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
- are supported. Note that `/v1/embeddings` batches are also restricted to a
- maximum of 50,000 embedding inputs across all requests in the batch.
-
- input_file_id: The ID of an uploaded file that contains requests for the new batch.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your input file must be formatted as a
- [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with
- the purpose `batch`. The file can contain up to 50,000 requests, and can be up
- to 200 MB in size.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/batches",
- body=await async_maybe_transform(
- {
- "completion_window": completion_window,
- "endpoint": endpoint,
- "input_file_id": input_file_id,
- "metadata": metadata,
- },
- batch_create_params.BatchCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
- async def retrieve(
- self,
- batch_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """
- Retrieves a batch.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._get(
- f"/batches/{batch_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> BatchListResponse:
- """List your organization's batches.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/batches",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- batch_list_params.BatchListParams,
- ),
- ),
- cast_to=BatchListResponse,
- )
-
- async def cancel(
- self,
- batch_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Batch:
- """Cancels an in-progress batch.
-
- The batch will be in status `cancelling` for up to
- 10 minutes, before changing to `cancelled`, where it will have partial results
- (if any) available in the output file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._post(
- f"/batches/{batch_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Batch,
- )
-
-
-class BatchesResourceWithRawResponse:
- def __init__(self, batches: BatchesResource) -> None:
- self._batches = batches
-
- self.create = to_raw_response_wrapper(
- batches.create,
- )
- self.retrieve = to_raw_response_wrapper(
- batches.retrieve,
- )
- self.list = to_raw_response_wrapper(
- batches.list,
- )
- self.cancel = to_raw_response_wrapper(
- batches.cancel,
- )
-
-
-class AsyncBatchesResourceWithRawResponse:
- def __init__(self, batches: AsyncBatchesResource) -> None:
- self._batches = batches
-
- self.create = async_to_raw_response_wrapper(
- batches.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- batches.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- batches.list,
- )
- self.cancel = async_to_raw_response_wrapper(
- batches.cancel,
- )
-
-
-class BatchesResourceWithStreamingResponse:
- def __init__(self, batches: BatchesResource) -> None:
- self._batches = batches
-
- self.create = to_streamed_response_wrapper(
- batches.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- batches.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- batches.list,
- )
- self.cancel = to_streamed_response_wrapper(
- batches.cancel,
- )
-
-
-class AsyncBatchesResourceWithStreamingResponse:
- def __init__(self, batches: AsyncBatchesResource) -> None:
- self._batches = batches
-
- self.create = async_to_streamed_response_wrapper(
- batches.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- batches.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- batches.list,
- )
- self.cancel = async_to_streamed_response_wrapper(
- batches.cancel,
- )
diff --git a/src/digitalocean_genai_sdk/resources/chat/chat.py b/src/digitalocean_genai_sdk/resources/chat/chat.py
deleted file mode 100644
index df1f356c..00000000
--- a/src/digitalocean_genai_sdk/resources/chat/chat.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from .completions import (
- CompletionsResource,
- AsyncCompletionsResource,
- CompletionsResourceWithRawResponse,
- AsyncCompletionsResourceWithRawResponse,
- CompletionsResourceWithStreamingResponse,
- AsyncCompletionsResourceWithStreamingResponse,
-)
-
-__all__ = ["ChatResource", "AsyncChatResource"]
-
-
-class ChatResource(SyncAPIResource):
- @cached_property
- def completions(self) -> CompletionsResource:
- return CompletionsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> ChatResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ChatResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ChatResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ChatResourceWithStreamingResponse(self)
-
-
-class AsyncChatResource(AsyncAPIResource):
- @cached_property
- def completions(self) -> AsyncCompletionsResource:
- return AsyncCompletionsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncChatResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncChatResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncChatResourceWithStreamingResponse(self)
-
-
-class ChatResourceWithRawResponse:
- def __init__(self, chat: ChatResource) -> None:
- self._chat = chat
-
- @cached_property
- def completions(self) -> CompletionsResourceWithRawResponse:
- return CompletionsResourceWithRawResponse(self._chat.completions)
-
-
-class AsyncChatResourceWithRawResponse:
- def __init__(self, chat: AsyncChatResource) -> None:
- self._chat = chat
-
- @cached_property
- def completions(self) -> AsyncCompletionsResourceWithRawResponse:
- return AsyncCompletionsResourceWithRawResponse(self._chat.completions)
-
-
-class ChatResourceWithStreamingResponse:
- def __init__(self, chat: ChatResource) -> None:
- self._chat = chat
-
- @cached_property
- def completions(self) -> CompletionsResourceWithStreamingResponse:
- return CompletionsResourceWithStreamingResponse(self._chat.completions)
-
-
-class AsyncChatResourceWithStreamingResponse:
- def __init__(self, chat: AsyncChatResource) -> None:
- self._chat = chat
-
- @cached_property
- def completions(self) -> AsyncCompletionsResourceWithStreamingResponse:
- return AsyncCompletionsResourceWithStreamingResponse(self._chat.completions)
diff --git a/src/digitalocean_genai_sdk/resources/chat/completions.py b/src/digitalocean_genai_sdk/resources/chat/completions.py
deleted file mode 100644
index c0908a57..00000000
--- a/src/digitalocean_genai_sdk/resources/chat/completions.py
+++ /dev/null
@@ -1,1233 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ...types import ReasoningEffort
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...types.chat import (
- completion_list_params,
- completion_create_params,
- completion_update_params,
- completion_list_messages_params,
-)
-from ..._base_client import make_request_options
-from ...types.reasoning_effort import ReasoningEffort
-from ...types.chat.create_response import CreateResponse
-from ...types.stop_configuration_param import StopConfigurationParam
-from ...types.chat.model_ids_shared_param import ModelIDsSharedParam
-from ...types.chat.completion_list_response import CompletionListResponse
-from ...types.chat.completion_delete_response import CompletionDeleteResponse
-from ...types.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
-from ...types.chat.completion_list_messages_response import CompletionListMessagesResponse
-
-__all__ = ["CompletionsResource", "AsyncCompletionsResource"]
-
-
-class CompletionsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> CompletionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return CompletionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return CompletionsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- messages: Iterable[completion_create_params.Message],
- model: ModelIDsSharedParam,
- audio: Optional[completion_create_params.Audio] | NotGiven = NOT_GIVEN,
- frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
- functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
- logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
- logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
- max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- prediction: Optional[completion_create_params.Prediction] | NotGiven = NOT_GIVEN,
- presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
- stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN,
- store: Optional[bool] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN,
- top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateResponse:
- """
- **Starting a new project?** We recommend trying
- [Responses](/docs/api-reference/responses) to take advantage of the latest
- OpenAI platform features. Compare
- [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses).
-
- ---
-
- Creates a model response for the given chat conversation. Learn more in the
- [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
- and [audio](/docs/guides/audio) guides.
-
- Parameter support can differ depending on the model used to generate the
- response, particularly for newer reasoning models. Parameters that are only
- supported for reasoning models are noted below. For the current state of
- unsupported parameters in reasoning models,
- [refer to the reasoning guide](/docs/guides/reasoning).
-
- Args:
- messages: A list of messages comprising the conversation so far. Depending on the
- [model](/docs/models) you use, different message types (modalities) are
- supported, like [text](/docs/guides/text-generation),
- [images](/docs/guides/vision), and [audio](/docs/guides/audio).
-
- model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
- wide range of models with different capabilities, performance characteristics,
- and price points. Refer to the [model guide](/docs/models) to browse and compare
- available models.
-
- audio: Parameters for audio output. Required when audio output is requested with
- `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
-
- frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
- existing frequency in the text so far, decreasing the model's likelihood to
- repeat the same line verbatim.
-
- function_call: Deprecated in favor of `tool_choice`.
-
- Controls which (if any) function is called by the model.
-
- `none` means the model will not call a function and instead generates a message.
-
- `auto` means the model can pick between generating a message or calling a
- function.
-
- Specifying a particular function via `{"name": "my_function"}` forces the model
- to call that function.
-
- `none` is the default when no functions are present. `auto` is the default if
- functions are present.
-
- functions: Deprecated in favor of `tools`.
-
- A list of functions the model may generate JSON inputs for.
-
- logit_bias: Modify the likelihood of specified tokens appearing in the completion.
-
- Accepts a JSON object that maps tokens (specified by their token ID in the
- tokenizer) to an associated bias value from -100 to 100. Mathematically, the
- bias is added to the logits generated by the model prior to sampling. The exact
- effect will vary per model, but values between -1 and 1 should decrease or
- increase likelihood of selection; values like -100 or 100 should result in a ban
- or exclusive selection of the relevant token.
-
- logprobs: Whether to return log probabilities of the output tokens or not. If true,
- returns the log probabilities of each output token returned in the `content` of
- `message`.
-
- max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
-
- max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
- completion. This value can be used to control
- [costs](https://openai.com/api/pricing/) for text generated via API.
-
- This value is now deprecated in favor of `max_completion_tokens`, and is not
- compatible with [o1 series models](/docs/guides/reasoning).
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- modalities: Output types that you would like the model to generate. Most models are capable
- of generating text, which is the default:
-
- `["text"]`
-
- The `gpt-4o-audio-preview` model can also be used to
- [generate audio](/docs/guides/audio). To request that this model generate both
- text and audio responses, you can use:
-
- `["text", "audio"]`
-
- n: How many chat completion choices to generate for each input message. Note that
- you will be charged based on the number of generated tokens across all of the
- choices. Keep `n` as `1` to minimize costs.
-
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- prediction: Static predicted output content, such as the content of a text file that is
- being regenerated.
-
- presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
- whether they appear in the text so far, increasing the model's likelihood to
- talk about new topics.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: An object specifying the format that the model must output.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
-
- seed: This feature is in Beta. If specified, our system will make a best effort to
- sample deterministically, such that repeated requests with the same `seed` and
- parameters should return the same result. Determinism is not guaranteed, and you
- should refer to the `system_fingerprint` response parameter to monitor changes
- in the backend.
-
- service_tier: Specifies the latency tier to use for processing the request. This parameter is
- relevant for customers subscribed to the scale tier service:
-
- - If set to 'auto', and the Project is Scale tier enabled, the system will
- utilize scale tier credits until they are exhausted.
- - If set to 'auto', and the Project is not Scale tier enabled, the request will
- be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
- - If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
- - When not set, the default behavior is 'auto'.
-
- When this parameter is set, the response body will include the `service_tier`
- utilized.
-
- stop: Up to 4 sequences where the API will stop generating further tokens. The
- returned text will not contain the stop sequence.
-
- store: Whether or not to store the output of this chat completion request for use in
- our [model distillation](/docs/guides/distillation) or
- [evals](/docs/guides/evals) products.
-
- stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/chat/streaming) for more
- information, along with the
- [streaming responses](/docs/guides/streaming-responses) guide for more
- information on how to handle the streaming events.
-
- stream_options: Options for streaming response. Only set this when you set `stream: true`.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic. We generally recommend altering this or `top_p` but
- not both.
-
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tool and instead generates a message. `auto` means the model can
- pick between generating a message or calling one or more tools. `required` means
- the model must call one or more tools. Specifying a particular tool via
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- `none` is the default when no tools are present. `auto` is the default if tools
- are present.
-
- tools: A list of tools the model may call. Currently, only functions are supported as a
- tool. Use this to provide a list of functions the model may generate JSON inputs
- for. A max of 128 functions are supported.
-
- top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
- return at each token position, each with an associated log probability.
- `logprobs` must be set to `true` if this parameter is used.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- web_search_options: This tool searches the web for relevant results to use in a response. Learn more
- about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/chat/completions",
- body=maybe_transform(
- {
- "messages": messages,
- "model": model,
- "audio": audio,
- "frequency_penalty": frequency_penalty,
- "function_call": function_call,
- "functions": functions,
- "logit_bias": logit_bias,
- "logprobs": logprobs,
- "max_completion_tokens": max_completion_tokens,
- "max_tokens": max_tokens,
- "metadata": metadata,
- "modalities": modalities,
- "n": n,
- "parallel_tool_calls": parallel_tool_calls,
- "prediction": prediction,
- "presence_penalty": presence_penalty,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "seed": seed,
- "service_tier": service_tier,
- "stop": stop,
- "store": store,
- "stream": stream,
- "stream_options": stream_options,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_logprobs": top_logprobs,
- "top_p": top_p,
- "user": user,
- "web_search_options": web_search_options,
- },
- completion_create_params.CompletionCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateResponse,
- )
-
- def retrieve(
- self,
- completion_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateResponse:
- """Get a stored chat completion.
-
- Only Chat Completions that have been created with
- the `store` parameter set to `true` will be returned.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return self._get(
- f"/chat/completions/{completion_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateResponse,
- )
-
- def update(
- self,
- completion_id: str,
- *,
- metadata: Optional[Dict[str, str]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateResponse:
- """Modify a stored chat completion.
-
- Only Chat Completions that have been created
- with the `store` parameter set to `true` can be modified. Currently, the only
- supported modification is to update the `metadata` field.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return self._post(
- f"/chat/completions/{completion_id}",
- body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateResponse,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: str | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionListResponse:
- """List stored Chat Completions.
-
- Only Chat Completions that have been stored with
- the `store` parameter set to `true` will be returned.
-
- Args:
- after: Identifier for the last chat completion from the previous pagination request.
-
- limit: Number of Chat Completions to retrieve.
-
- metadata:
- A list of metadata keys to filter the Chat Completions by. Example:
-
- `metadata[key1]=value1&metadata[key2]=value2`
-
- model: The model used to generate the Chat Completions.
-
- order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
- `desc` for descending order. Defaults to `asc`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/chat/completions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "metadata": metadata,
- "model": model,
- "order": order,
- },
- completion_list_params.CompletionListParams,
- ),
- ),
- cast_to=CompletionListResponse,
- )
-
- def delete(
- self,
- completion_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionDeleteResponse:
- """Delete a stored chat completion.
-
- Only Chat Completions that have been created
- with the `store` parameter set to `true` can be deleted.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return self._delete(
- f"/chat/completions/{completion_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CompletionDeleteResponse,
- )
-
- def list_messages(
- self,
- completion_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionListMessagesResponse:
- """Get the messages in a stored chat completion.
-
- Only Chat Completions that have
- been created with the `store` parameter set to `true` will be returned.
-
- Args:
- after: Identifier for the last message from the previous pagination request.
-
- limit: Number of messages to retrieve.
-
- order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
- for descending order. Defaults to `asc`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return self._get(
- f"/chat/completions/{completion_id}/messages",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- },
- completion_list_messages_params.CompletionListMessagesParams,
- ),
- ),
- cast_to=CompletionListMessagesResponse,
- )
-
-
-class AsyncCompletionsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncCompletionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncCompletionsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- messages: Iterable[completion_create_params.Message],
- model: ModelIDsSharedParam,
- audio: Optional[completion_create_params.Audio] | NotGiven = NOT_GIVEN,
- frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
- functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
- logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
- logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
- max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- prediction: Optional[completion_create_params.Prediction] | NotGiven = NOT_GIVEN,
- presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
- stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN,
- store: Optional[bool] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN,
- top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateResponse:
- """
- **Starting a new project?** We recommend trying
- [Responses](/docs/api-reference/responses) to take advantage of the latest
- OpenAI platform features. Compare
- [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses).
-
- ---
-
- Creates a model response for the given chat conversation. Learn more in the
- [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
- and [audio](/docs/guides/audio) guides.
-
- Parameter support can differ depending on the model used to generate the
- response, particularly for newer reasoning models. Parameters that are only
- supported for reasoning models are noted below. For the current state of
- unsupported parameters in reasoning models,
- [refer to the reasoning guide](/docs/guides/reasoning).
-
- Args:
- messages: A list of messages comprising the conversation so far. Depending on the
- [model](/docs/models) you use, different message types (modalities) are
- supported, like [text](/docs/guides/text-generation),
- [images](/docs/guides/vision), and [audio](/docs/guides/audio).
-
- model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
- wide range of models with different capabilities, performance characteristics,
- and price points. Refer to the [model guide](/docs/models) to browse and compare
- available models.
-
- audio: Parameters for audio output. Required when audio output is requested with
- `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
-
- frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
- existing frequency in the text so far, decreasing the model's likelihood to
- repeat the same line verbatim.
-
- function_call: Deprecated in favor of `tool_choice`.
-
- Controls which (if any) function is called by the model.
-
- `none` means the model will not call a function and instead generates a message.
-
- `auto` means the model can pick between generating a message or calling a
- function.
-
- Specifying a particular function via `{"name": "my_function"}` forces the model
- to call that function.
-
- `none` is the default when no functions are present. `auto` is the default if
- functions are present.
-
- functions: Deprecated in favor of `tools`.
-
- A list of functions the model may generate JSON inputs for.
-
- logit_bias: Modify the likelihood of specified tokens appearing in the completion.
-
- Accepts a JSON object that maps tokens (specified by their token ID in the
- tokenizer) to an associated bias value from -100 to 100. Mathematically, the
- bias is added to the logits generated by the model prior to sampling. The exact
- effect will vary per model, but values between -1 and 1 should decrease or
- increase likelihood of selection; values like -100 or 100 should result in a ban
- or exclusive selection of the relevant token.
-
- logprobs: Whether to return log probabilities of the output tokens or not. If true,
- returns the log probabilities of each output token returned in the `content` of
- `message`.
-
- max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
-
- max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
- completion. This value can be used to control
- [costs](https://openai.com/api/pricing/) for text generated via API.
-
- This value is now deprecated in favor of `max_completion_tokens`, and is not
- compatible with [o1 series models](/docs/guides/reasoning).
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- modalities: Output types that you would like the model to generate. Most models are capable
- of generating text, which is the default:
-
- `["text"]`
-
- The `gpt-4o-audio-preview` model can also be used to
- [generate audio](/docs/guides/audio). To request that this model generate both
- text and audio responses, you can use:
-
- `["text", "audio"]`
-
- n: How many chat completion choices to generate for each input message. Note that
- you will be charged based on the number of generated tokens across all of the
- choices. Keep `n` as `1` to minimize costs.
-
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- prediction: Static predicted output content, such as the content of a text file that is
- being regenerated.
-
- presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
- whether they appear in the text so far, increasing the model's likelihood to
- talk about new topics.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: An object specifying the format that the model must output.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
-
- seed: This feature is in Beta. If specified, our system will make a best effort to
- sample deterministically, such that repeated requests with the same `seed` and
- parameters should return the same result. Determinism is not guaranteed, and you
- should refer to the `system_fingerprint` response parameter to monitor changes
- in the backend.
-
- service_tier: Specifies the latency tier to use for processing the request. This parameter is
- relevant for customers subscribed to the scale tier service:
-
- - If set to 'auto', and the Project is Scale tier enabled, the system will
- utilize scale tier credits until they are exhausted.
- - If set to 'auto', and the Project is not Scale tier enabled, the request will
- be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
- - If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
- - When not set, the default behavior is 'auto'.
-
- When this parameter is set, the response body will include the `service_tier`
- utilized.
-
- stop: Up to 4 sequences where the API will stop generating further tokens. The
- returned text will not contain the stop sequence.
-
- store: Whether or not to store the output of this chat completion request for use in
- our [model distillation](/docs/guides/distillation) or
- [evals](/docs/guides/evals) products.
-
- stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/chat/streaming) for more
- information, along with the
- [streaming responses](/docs/guides/streaming-responses) guide for more
- information on how to handle the streaming events.
-
- stream_options: Options for streaming response. Only set this when you set `stream: true`.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic. We generally recommend altering this or `top_p` but
- not both.
-
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tool and instead generates a message. `auto` means the model can
- pick between generating a message or calling one or more tools. `required` means
- the model must call one or more tools. Specifying a particular tool via
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- `none` is the default when no tools are present. `auto` is the default if tools
- are present.
-
- tools: A list of tools the model may call. Currently, only functions are supported as a
- tool. Use this to provide a list of functions the model may generate JSON inputs
- for. A max of 128 functions are supported.
-
- top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
- return at each token position, each with an associated log probability.
- `logprobs` must be set to `true` if this parameter is used.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- web_search_options: This tool searches the web for relevant results to use in a response. Learn more
- about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/chat/completions",
- body=await async_maybe_transform(
- {
- "messages": messages,
- "model": model,
- "audio": audio,
- "frequency_penalty": frequency_penalty,
- "function_call": function_call,
- "functions": functions,
- "logit_bias": logit_bias,
- "logprobs": logprobs,
- "max_completion_tokens": max_completion_tokens,
- "max_tokens": max_tokens,
- "metadata": metadata,
- "modalities": modalities,
- "n": n,
- "parallel_tool_calls": parallel_tool_calls,
- "prediction": prediction,
- "presence_penalty": presence_penalty,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "seed": seed,
- "service_tier": service_tier,
- "stop": stop,
- "store": store,
- "stream": stream,
- "stream_options": stream_options,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_logprobs": top_logprobs,
- "top_p": top_p,
- "user": user,
- "web_search_options": web_search_options,
- },
- completion_create_params.CompletionCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateResponse,
- )
-
- async def retrieve(
- self,
- completion_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateResponse:
- """Get a stored chat completion.
-
- Only Chat Completions that have been created with
- the `store` parameter set to `true` will be returned.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return await self._get(
- f"/chat/completions/{completion_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateResponse,
- )
-
- async def update(
- self,
- completion_id: str,
- *,
- metadata: Optional[Dict[str, str]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CreateResponse:
- """Modify a stored chat completion.
-
- Only Chat Completions that have been created
- with the `store` parameter set to `true` can be modified. Currently, the only
- supported modification is to update the `metadata` field.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return await self._post(
- f"/chat/completions/{completion_id}",
- body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CreateResponse,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: str | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionListResponse:
- """List stored Chat Completions.
-
- Only Chat Completions that have been stored with
- the `store` parameter set to `true` will be returned.
-
- Args:
- after: Identifier for the last chat completion from the previous pagination request.
-
- limit: Number of Chat Completions to retrieve.
-
- metadata:
- A list of metadata keys to filter the Chat Completions by. Example:
-
- `metadata[key1]=value1&metadata[key2]=value2`
-
- model: The model used to generate the Chat Completions.
-
- order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
- `desc` for descending order. Defaults to `asc`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/chat/completions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "metadata": metadata,
- "model": model,
- "order": order,
- },
- completion_list_params.CompletionListParams,
- ),
- ),
- cast_to=CompletionListResponse,
- )
-
- async def delete(
- self,
- completion_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionDeleteResponse:
- """Delete a stored chat completion.
-
- Only Chat Completions that have been created
- with the `store` parameter set to `true` can be deleted.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return await self._delete(
- f"/chat/completions/{completion_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CompletionDeleteResponse,
- )
-
- async def list_messages(
- self,
- completion_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionListMessagesResponse:
- """Get the messages in a stored chat completion.
-
- Only Chat Completions that have
- been created with the `store` parameter set to `true` will be returned.
-
- Args:
- after: Identifier for the last message from the previous pagination request.
-
- limit: Number of messages to retrieve.
-
- order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
- for descending order. Defaults to `asc`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not completion_id:
- raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
- return await self._get(
- f"/chat/completions/{completion_id}/messages",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- },
- completion_list_messages_params.CompletionListMessagesParams,
- ),
- ),
- cast_to=CompletionListMessagesResponse,
- )
-
-
-class CompletionsResourceWithRawResponse:
- def __init__(self, completions: CompletionsResource) -> None:
- self._completions = completions
-
- self.create = to_raw_response_wrapper(
- completions.create,
- )
- self.retrieve = to_raw_response_wrapper(
- completions.retrieve,
- )
- self.update = to_raw_response_wrapper(
- completions.update,
- )
- self.list = to_raw_response_wrapper(
- completions.list,
- )
- self.delete = to_raw_response_wrapper(
- completions.delete,
- )
- self.list_messages = to_raw_response_wrapper(
- completions.list_messages,
- )
-
-
-class AsyncCompletionsResourceWithRawResponse:
- def __init__(self, completions: AsyncCompletionsResource) -> None:
- self._completions = completions
-
- self.create = async_to_raw_response_wrapper(
- completions.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- completions.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- completions.update,
- )
- self.list = async_to_raw_response_wrapper(
- completions.list,
- )
- self.delete = async_to_raw_response_wrapper(
- completions.delete,
- )
- self.list_messages = async_to_raw_response_wrapper(
- completions.list_messages,
- )
-
-
-class CompletionsResourceWithStreamingResponse:
- def __init__(self, completions: CompletionsResource) -> None:
- self._completions = completions
-
- self.create = to_streamed_response_wrapper(
- completions.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- completions.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- completions.update,
- )
- self.list = to_streamed_response_wrapper(
- completions.list,
- )
- self.delete = to_streamed_response_wrapper(
- completions.delete,
- )
- self.list_messages = to_streamed_response_wrapper(
- completions.list_messages,
- )
-
-
-class AsyncCompletionsResourceWithStreamingResponse:
- def __init__(self, completions: AsyncCompletionsResource) -> None:
- self._completions = completions
-
- self.create = async_to_streamed_response_wrapper(
- completions.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- completions.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- completions.update,
- )
- self.list = async_to_streamed_response_wrapper(
- completions.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- completions.delete,
- )
- self.list_messages = async_to_streamed_response_wrapper(
- completions.list_messages,
- )
diff --git a/src/digitalocean_genai_sdk/resources/completions.py b/src/digitalocean_genai_sdk/resources/completions.py
deleted file mode 100644
index ff495166..00000000
--- a/src/digitalocean_genai_sdk/resources/completions.py
+++ /dev/null
@@ -1,460 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import completion_create_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.stop_configuration_param import StopConfigurationParam
-from ..types.completion_create_response import CompletionCreateResponse
-from ..types.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
-
-__all__ = ["CompletionsResource", "AsyncCompletionsResource"]
-
-
-class CompletionsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> CompletionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return CompletionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return CompletionsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
- prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
- best_of: Optional[int] | NotGiven = NOT_GIVEN,
- echo: Optional[bool] | NotGiven = NOT_GIVEN,
- frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
- logprobs: Optional[int] | NotGiven = NOT_GIVEN,
- max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
- suffix: Optional[str] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionCreateResponse:
- """
- Creates a completion for the provided prompt and parameters.
-
- Args:
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- prompt: The prompt(s) to generate completions for, encoded as a string, array of
- strings, array of tokens, or array of token arrays.
-
- Note that <|endoftext|> is the document separator that the model sees during
- training, so if a prompt is not specified the model will generate as if from the
- beginning of a new document.
-
- best_of: Generates `best_of` completions server-side and returns the "best" (the one with
- the highest log probability per token). Results cannot be streamed.
-
- When used with `n`, `best_of` controls the number of candidate completions and
- `n` specifies how many to return – `best_of` must be greater than `n`.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
-
- echo: Echo back the prompt in addition to the completion
-
- frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
- existing frequency in the text so far, decreasing the model's likelihood to
- repeat the same line verbatim.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
-
- logit_bias: Modify the likelihood of specified tokens appearing in the completion.
-
- Accepts a JSON object that maps tokens (specified by their token ID in the GPT
- tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
- Mathematically, the bias is added to the logits generated by the model prior to
- sampling. The exact effect will vary per model, but values between -1 and 1
- should decrease or increase likelihood of selection; values like -100 or 100
- should result in a ban or exclusive selection of the relevant token.
-
- As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
- from being generated.
-
- logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
- well the chosen tokens. For example, if `logprobs` is 5, the API will return a
- list of the 5 most likely tokens. The API will always return the `logprob` of
- the sampled token, so there may be up to `logprobs+1` elements in the response.
-
- The maximum value for `logprobs` is 5.
-
- max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
- completion.
-
- The token count of your prompt plus `max_tokens` cannot exceed the model's
- context length.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens.
-
- n: How many completions to generate for each prompt.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
-
- presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
- whether they appear in the text so far, increasing the model's likelihood to
- talk about new topics.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
-
- seed: If specified, our system will make a best effort to sample deterministically,
- such that repeated requests with the same `seed` and parameters should return
- the same result.
-
- Determinism is not guaranteed, and you should refer to the `system_fingerprint`
- response parameter to monitor changes in the backend.
-
- stop: Up to 4 sequences where the API will stop generating further tokens. The
- returned text will not contain the stop sequence.
-
- stream: Whether to stream back partial progress. If set, tokens will be sent as
- data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available, with the stream terminated by a `data: [DONE]`
- message.
- [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
-
- stream_options: Options for streaming response. Only set this when you set `stream: true`.
-
- suffix: The suffix that comes after a completion of inserted text.
-
- This parameter is only supported for `gpt-3.5-turbo-instruct`.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- We generally recommend altering this or `top_p` but not both.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/completions",
- body=maybe_transform(
- {
- "model": model,
- "prompt": prompt,
- "best_of": best_of,
- "echo": echo,
- "frequency_penalty": frequency_penalty,
- "logit_bias": logit_bias,
- "logprobs": logprobs,
- "max_tokens": max_tokens,
- "n": n,
- "presence_penalty": presence_penalty,
- "seed": seed,
- "stop": stop,
- "stream": stream,
- "stream_options": stream_options,
- "suffix": suffix,
- "temperature": temperature,
- "top_p": top_p,
- "user": user,
- },
- completion_create_params.CompletionCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CompletionCreateResponse,
- )
-
-
-class AsyncCompletionsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncCompletionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncCompletionsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
- prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None],
- best_of: Optional[int] | NotGiven = NOT_GIVEN,
- echo: Optional[bool] | NotGiven = NOT_GIVEN,
- frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
- logprobs: Optional[int] | NotGiven = NOT_GIVEN,
- max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
- suffix: Optional[str] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionCreateResponse:
- """
- Creates a completion for the provided prompt and parameters.
-
- Args:
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- prompt: The prompt(s) to generate completions for, encoded as a string, array of
- strings, array of tokens, or array of token arrays.
-
- Note that <|endoftext|> is the document separator that the model sees during
- training, so if a prompt is not specified the model will generate as if from the
- beginning of a new document.
-
- best_of: Generates `best_of` completions server-side and returns the "best" (the one with
- the highest log probability per token). Results cannot be streamed.
-
- When used with `n`, `best_of` controls the number of candidate completions and
- `n` specifies how many to return – `best_of` must be greater than `n`.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
-
- echo: Echo back the prompt in addition to the completion
-
- frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
- existing frequency in the text so far, decreasing the model's likelihood to
- repeat the same line verbatim.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
-
- logit_bias: Modify the likelihood of specified tokens appearing in the completion.
-
- Accepts a JSON object that maps tokens (specified by their token ID in the GPT
- tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
- Mathematically, the bias is added to the logits generated by the model prior to
- sampling. The exact effect will vary per model, but values between -1 and 1
- should decrease or increase likelihood of selection; values like -100 or 100
- should result in a ban or exclusive selection of the relevant token.
-
- As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
- from being generated.
-
- logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
- well the chosen tokens. For example, if `logprobs` is 5, the API will return a
- list of the 5 most likely tokens. The API will always return the `logprob` of
- the sampled token, so there may be up to `logprobs+1` elements in the response.
-
- The maximum value for `logprobs` is 5.
-
- max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
- completion.
-
- The token count of your prompt plus `max_tokens` cannot exceed the model's
- context length.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens.
-
- n: How many completions to generate for each prompt.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
-
- presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
- whether they appear in the text so far, increasing the model's likelihood to
- talk about new topics.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
-
- seed: If specified, our system will make a best effort to sample deterministically,
- such that repeated requests with the same `seed` and parameters should return
- the same result.
-
- Determinism is not guaranteed, and you should refer to the `system_fingerprint`
- response parameter to monitor changes in the backend.
-
- stop: Up to 4 sequences where the API will stop generating further tokens. The
- returned text will not contain the stop sequence.
-
- stream: Whether to stream back partial progress. If set, tokens will be sent as
- data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available, with the stream terminated by a `data: [DONE]`
- message.
- [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
-
- stream_options: Options for streaming response. Only set this when you set `stream: true`.
-
- suffix: The suffix that comes after a completion of inserted text.
-
- This parameter is only supported for `gpt-3.5-turbo-instruct`.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- We generally recommend altering this or `top_p` but not both.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/completions",
- body=await async_maybe_transform(
- {
- "model": model,
- "prompt": prompt,
- "best_of": best_of,
- "echo": echo,
- "frequency_penalty": frequency_penalty,
- "logit_bias": logit_bias,
- "logprobs": logprobs,
- "max_tokens": max_tokens,
- "n": n,
- "presence_penalty": presence_penalty,
- "seed": seed,
- "stop": stop,
- "stream": stream,
- "stream_options": stream_options,
- "suffix": suffix,
- "temperature": temperature,
- "top_p": top_p,
- "user": user,
- },
- completion_create_params.CompletionCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=CompletionCreateResponse,
- )
-
-
-class CompletionsResourceWithRawResponse:
- def __init__(self, completions: CompletionsResource) -> None:
- self._completions = completions
-
- self.create = to_raw_response_wrapper(
- completions.create,
- )
-
-
-class AsyncCompletionsResourceWithRawResponse:
- def __init__(self, completions: AsyncCompletionsResource) -> None:
- self._completions = completions
-
- self.create = async_to_raw_response_wrapper(
- completions.create,
- )
-
-
-class CompletionsResourceWithStreamingResponse:
- def __init__(self, completions: CompletionsResource) -> None:
- self._completions = completions
-
- self.create = to_streamed_response_wrapper(
- completions.create,
- )
-
-
-class AsyncCompletionsResourceWithStreamingResponse:
- def __init__(self, completions: AsyncCompletionsResource) -> None:
- self._completions = completions
-
- self.create = async_to_streamed_response_wrapper(
- completions.create,
- )
diff --git a/src/digitalocean_genai_sdk/resources/embeddings.py b/src/digitalocean_genai_sdk/resources/embeddings.py
deleted file mode 100644
index 92552f62..00000000
--- a/src/digitalocean_genai_sdk/resources/embeddings.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import embedding_create_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.embedding_create_response import EmbeddingCreateResponse
-
-__all__ = ["EmbeddingsResource", "AsyncEmbeddingsResource"]
-
-
-class EmbeddingsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> EmbeddingsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return EmbeddingsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return EmbeddingsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]],
- model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]],
- dimensions: int | NotGiven = NOT_GIVEN,
- encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> EmbeddingCreateResponse:
- """
- Creates an embedding vector representing the input text.
-
- Args:
- input: Input text to embed, encoded as a string or array of tokens. To embed multiple
- inputs in a single request, pass an array of strings or array of token arrays.
- The input must not exceed the max input tokens for the model (8192 tokens for
- `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
- dimensions or less.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens. Some models may also impose a limit on total number of
- tokens summed across inputs.
-
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- dimensions: The number of dimensions the resulting output embeddings should have. Only
- supported in `text-embedding-3` and later models.
-
- encoding_format: The format to return the embeddings in. Can be either `float` or
- [`base64`](https://pypi.org/project/pybase64/).
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/embeddings",
- body=maybe_transform(
- {
- "input": input,
- "model": model,
- "dimensions": dimensions,
- "encoding_format": encoding_format,
- "user": user,
- },
- embedding_create_params.EmbeddingCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=EmbeddingCreateResponse,
- )
-
-
-class AsyncEmbeddingsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncEmbeddingsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncEmbeddingsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]],
- model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]],
- dimensions: int | NotGiven = NOT_GIVEN,
- encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> EmbeddingCreateResponse:
- """
- Creates an embedding vector representing the input text.
-
- Args:
- input: Input text to embed, encoded as a string or array of tokens. To embed multiple
- inputs in a single request, pass an array of strings or array of token arrays.
- The input must not exceed the max input tokens for the model (8192 tokens for
- `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
- dimensions or less.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens. Some models may also impose a limit on total number of
- tokens summed across inputs.
-
- model: ID of the model to use. You can use the
- [List models](/docs/api-reference/models/list) API to see all of your available
- models, or see our [Model overview](/docs/models) for descriptions of them.
-
- dimensions: The number of dimensions the resulting output embeddings should have. Only
- supported in `text-embedding-3` and later models.
-
- encoding_format: The format to return the embeddings in. Can be either `float` or
- [`base64`](https://pypi.org/project/pybase64/).
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/embeddings",
- body=await async_maybe_transform(
- {
- "input": input,
- "model": model,
- "dimensions": dimensions,
- "encoding_format": encoding_format,
- "user": user,
- },
- embedding_create_params.EmbeddingCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=EmbeddingCreateResponse,
- )
-
-
-class EmbeddingsResourceWithRawResponse:
- def __init__(self, embeddings: EmbeddingsResource) -> None:
- self._embeddings = embeddings
-
- self.create = to_raw_response_wrapper(
- embeddings.create,
- )
-
-
-class AsyncEmbeddingsResourceWithRawResponse:
- def __init__(self, embeddings: AsyncEmbeddingsResource) -> None:
- self._embeddings = embeddings
-
- self.create = async_to_raw_response_wrapper(
- embeddings.create,
- )
-
-
-class EmbeddingsResourceWithStreamingResponse:
- def __init__(self, embeddings: EmbeddingsResource) -> None:
- self._embeddings = embeddings
-
- self.create = to_streamed_response_wrapper(
- embeddings.create,
- )
-
-
-class AsyncEmbeddingsResourceWithStreamingResponse:
- def __init__(self, embeddings: AsyncEmbeddingsResource) -> None:
- self._embeddings = embeddings
-
- self.create = async_to_streamed_response_wrapper(
- embeddings.create,
- )
diff --git a/src/digitalocean_genai_sdk/resources/files.py b/src/digitalocean_genai_sdk/resources/files.py
deleted file mode 100644
index 65e459f4..00000000
--- a/src/digitalocean_genai_sdk/resources/files.py
+++ /dev/null
@@ -1,608 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Mapping, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import file_list_params, file_upload_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.openai_file import OpenAIFile
-from ..types.file_list_response import FileListResponse
-from ..types.file_delete_response import FileDeleteResponse
-
-__all__ = ["FilesResource", "AsyncFilesResource"]
-
-
-class FilesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> FilesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return FilesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> FilesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return FilesResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIFile:
- """
- Returns information about a specific file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._get(
- f"/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIFile,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- purpose: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileListResponse:
- """Returns a list of files.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 10,000, and the default is 10,000.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- purpose: Only return files with the given purpose.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- "purpose": purpose,
- },
- file_list_params.FileListParams,
- ),
- ),
- cast_to=FileListResponse,
- )
-
- def delete(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileDeleteResponse:
- """
- Delete a file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._delete(
- f"/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileDeleteResponse,
- )
-
- def retrieve_content(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> str:
- """
- Returns the contents of the specified file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._get(
- f"/files/{file_id}/content",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=str,
- )
-
- def upload(
- self,
- *,
- file: FileTypes,
- purpose: Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIFile:
- """Upload a file that can be used across various endpoints.
-
- Individual files can be
- up to 512 MB, and the size of all files uploaded by one organization can be up
- to 100 GB.
-
- The Assistants API supports files up to 2 million tokens and of specific file
- types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
-
- The Fine-tuning API only supports `.jsonl` files. The input also has certain
- required formats for fine-tuning
- [chat](/docs/api-reference/fine-tuning/chat-input) or
- [completions](/docs/api-reference/fine-tuning/completions-input) models.
-
- The Batch API only supports `.jsonl` files up to 200 MB in size. The input also
- has a specific required [format](/docs/api-reference/batch/request-input).
-
- Please [contact us](https://help.openai.com/) if you need to increase these
- storage limits.
-
- Args:
- file: The File object (not file name) to be uploaded.
-
- purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
- Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
- fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
- Flexible file type for any purpose - `evals`: Used for eval data sets
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "purpose": purpose,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- "/files",
- body=maybe_transform(body, file_upload_params.FileUploadParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIFile,
- )
-
-
-class AsyncFilesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncFilesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncFilesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncFilesResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIFile:
- """
- Returns information about a specific file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._get(
- f"/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIFile,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- purpose: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileListResponse:
- """Returns a list of files.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 10,000, and the default is 10,000.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- purpose: Only return files with the given purpose.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- "purpose": purpose,
- },
- file_list_params.FileListParams,
- ),
- ),
- cast_to=FileListResponse,
- )
-
- async def delete(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileDeleteResponse:
- """
- Delete a file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._delete(
- f"/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileDeleteResponse,
- )
-
- async def retrieve_content(
- self,
- file_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> str:
- """
- Returns the contents of the specified file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._get(
- f"/files/{file_id}/content",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=str,
- )
-
- async def upload(
- self,
- *,
- file: FileTypes,
- purpose: Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIFile:
- """Upload a file that can be used across various endpoints.
-
- Individual files can be
- up to 512 MB, and the size of all files uploaded by one organization can be up
- to 100 GB.
-
- The Assistants API supports files up to 2 million tokens and of specific file
- types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
-
- The Fine-tuning API only supports `.jsonl` files. The input also has certain
- required formats for fine-tuning
- [chat](/docs/api-reference/fine-tuning/chat-input) or
- [completions](/docs/api-reference/fine-tuning/completions-input) models.
-
- The Batch API only supports `.jsonl` files up to 200 MB in size. The input also
- has a specific required [format](/docs/api-reference/batch/request-input).
-
- Please [contact us](https://help.openai.com/) if you need to increase these
- storage limits.
-
- Args:
- file: The File object (not file name) to be uploaded.
-
- purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
- Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
- fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
- Flexible file type for any purpose - `evals`: Used for eval data sets
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "purpose": purpose,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/files",
- body=await async_maybe_transform(body, file_upload_params.FileUploadParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIFile,
- )
-
-
-class FilesResourceWithRawResponse:
- def __init__(self, files: FilesResource) -> None:
- self._files = files
-
- self.retrieve = to_raw_response_wrapper(
- files.retrieve,
- )
- self.list = to_raw_response_wrapper(
- files.list,
- )
- self.delete = to_raw_response_wrapper(
- files.delete,
- )
- self.retrieve_content = to_raw_response_wrapper(
- files.retrieve_content,
- )
- self.upload = to_raw_response_wrapper(
- files.upload,
- )
-
-
-class AsyncFilesResourceWithRawResponse:
- def __init__(self, files: AsyncFilesResource) -> None:
- self._files = files
-
- self.retrieve = async_to_raw_response_wrapper(
- files.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- files.list,
- )
- self.delete = async_to_raw_response_wrapper(
- files.delete,
- )
- self.retrieve_content = async_to_raw_response_wrapper(
- files.retrieve_content,
- )
- self.upload = async_to_raw_response_wrapper(
- files.upload,
- )
-
-
-class FilesResourceWithStreamingResponse:
- def __init__(self, files: FilesResource) -> None:
- self._files = files
-
- self.retrieve = to_streamed_response_wrapper(
- files.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- files.list,
- )
- self.delete = to_streamed_response_wrapper(
- files.delete,
- )
- self.retrieve_content = to_streamed_response_wrapper(
- files.retrieve_content,
- )
- self.upload = to_streamed_response_wrapper(
- files.upload,
- )
-
-
-class AsyncFilesResourceWithStreamingResponse:
- def __init__(self, files: AsyncFilesResource) -> None:
- self._files = files
-
- self.retrieve = async_to_streamed_response_wrapper(
- files.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- files.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- files.delete,
- )
- self.retrieve_content = async_to_streamed_response_wrapper(
- files.retrieve_content,
- )
- self.upload = async_to_streamed_response_wrapper(
- files.upload,
- )
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py
deleted file mode 100644
index 5f198d2e..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .jobs import (
- JobsResource,
- AsyncJobsResource,
- JobsResourceWithRawResponse,
- AsyncJobsResourceWithRawResponse,
- JobsResourceWithStreamingResponse,
- AsyncJobsResourceWithStreamingResponse,
-)
-from .checkpoints import (
- CheckpointsResource,
- AsyncCheckpointsResource,
- CheckpointsResourceWithRawResponse,
- AsyncCheckpointsResourceWithRawResponse,
- CheckpointsResourceWithStreamingResponse,
- AsyncCheckpointsResourceWithStreamingResponse,
-)
-from .fine_tuning import (
- FineTuningResource,
- AsyncFineTuningResource,
- FineTuningResourceWithRawResponse,
- AsyncFineTuningResourceWithRawResponse,
- FineTuningResourceWithStreamingResponse,
- AsyncFineTuningResourceWithStreamingResponse,
-)
-
-__all__ = [
- "CheckpointsResource",
- "AsyncCheckpointsResource",
- "CheckpointsResourceWithRawResponse",
- "AsyncCheckpointsResourceWithRawResponse",
- "CheckpointsResourceWithStreamingResponse",
- "AsyncCheckpointsResourceWithStreamingResponse",
- "JobsResource",
- "AsyncJobsResource",
- "JobsResourceWithRawResponse",
- "AsyncJobsResourceWithRawResponse",
- "JobsResourceWithStreamingResponse",
- "AsyncJobsResourceWithStreamingResponse",
- "FineTuningResource",
- "AsyncFineTuningResource",
- "FineTuningResourceWithRawResponse",
- "AsyncFineTuningResourceWithRawResponse",
- "FineTuningResourceWithStreamingResponse",
- "AsyncFineTuningResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py
deleted file mode 100644
index 3f6710f0..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .checkpoints import (
- CheckpointsResource,
- AsyncCheckpointsResource,
- CheckpointsResourceWithRawResponse,
- AsyncCheckpointsResourceWithRawResponse,
- CheckpointsResourceWithStreamingResponse,
- AsyncCheckpointsResourceWithStreamingResponse,
-)
-from .permissions import (
- PermissionsResource,
- AsyncPermissionsResource,
- PermissionsResourceWithRawResponse,
- AsyncPermissionsResourceWithRawResponse,
- PermissionsResourceWithStreamingResponse,
- AsyncPermissionsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "PermissionsResource",
- "AsyncPermissionsResource",
- "PermissionsResourceWithRawResponse",
- "AsyncPermissionsResourceWithRawResponse",
- "PermissionsResourceWithStreamingResponse",
- "AsyncPermissionsResourceWithStreamingResponse",
- "CheckpointsResource",
- "AsyncCheckpointsResource",
- "CheckpointsResourceWithRawResponse",
- "AsyncCheckpointsResourceWithRawResponse",
- "CheckpointsResourceWithStreamingResponse",
- "AsyncCheckpointsResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py
deleted file mode 100644
index b1a85058..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from ...._compat import cached_property
-from .permissions import (
- PermissionsResource,
- AsyncPermissionsResource,
- PermissionsResourceWithRawResponse,
- AsyncPermissionsResourceWithRawResponse,
- PermissionsResourceWithStreamingResponse,
- AsyncPermissionsResourceWithStreamingResponse,
-)
-from ...._resource import SyncAPIResource, AsyncAPIResource
-
-__all__ = ["CheckpointsResource", "AsyncCheckpointsResource"]
-
-
-class CheckpointsResource(SyncAPIResource):
- @cached_property
- def permissions(self) -> PermissionsResource:
- return PermissionsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> CheckpointsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return CheckpointsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> CheckpointsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return CheckpointsResourceWithStreamingResponse(self)
-
-
-class AsyncCheckpointsResource(AsyncAPIResource):
- @cached_property
- def permissions(self) -> AsyncPermissionsResource:
- return AsyncPermissionsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncCheckpointsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncCheckpointsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncCheckpointsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncCheckpointsResourceWithStreamingResponse(self)
-
-
-class CheckpointsResourceWithRawResponse:
- def __init__(self, checkpoints: CheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- @cached_property
- def permissions(self) -> PermissionsResourceWithRawResponse:
- return PermissionsResourceWithRawResponse(self._checkpoints.permissions)
-
-
-class AsyncCheckpointsResourceWithRawResponse:
- def __init__(self, checkpoints: AsyncCheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- @cached_property
- def permissions(self) -> AsyncPermissionsResourceWithRawResponse:
- return AsyncPermissionsResourceWithRawResponse(self._checkpoints.permissions)
-
-
-class CheckpointsResourceWithStreamingResponse:
- def __init__(self, checkpoints: CheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- @cached_property
- def permissions(self) -> PermissionsResourceWithStreamingResponse:
- return PermissionsResourceWithStreamingResponse(self._checkpoints.permissions)
-
-
-class AsyncCheckpointsResourceWithStreamingResponse:
- def __init__(self, checkpoints: AsyncCheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- @cached_property
- def permissions(self) -> AsyncPermissionsResourceWithStreamingResponse:
- return AsyncPermissionsResourceWithStreamingResponse(self._checkpoints.permissions)
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py
deleted file mode 100644
index 0dee4435..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py
+++ /dev/null
@@ -1,401 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.fine_tuning.checkpoints import permission_create_params, permission_retrieve_params
-from ....types.fine_tuning.checkpoints.permission_delete_response import PermissionDeleteResponse
-from ....types.fine_tuning.checkpoints.list_fine_tuning_checkpoint_permission import ListFineTuningCheckpointPermission
-
-__all__ = ["PermissionsResource", "AsyncPermissionsResource"]
-
-
-class PermissionsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> PermissionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return PermissionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> PermissionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return PermissionsResourceWithStreamingResponse(self)
-
- def create(
- self,
- permission_id: str,
- *,
- project_ids: List[str],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListFineTuningCheckpointPermission:
- """
- **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
-
- This enables organization owners to share fine-tuned models with other projects
- in their organization.
-
- Args:
- project_ids: The project identifiers to grant access to.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return self._post(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ListFineTuningCheckpointPermission,
- )
-
- def retrieve(
- self,
- permission_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN,
- project_id: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListFineTuningCheckpointPermission:
- """
- **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
-
- Organization owners can use this endpoint to view all permissions for a
- fine-tuned model checkpoint.
-
- Args:
- after: Identifier for the last permission ID from the previous pagination request.
-
- limit: Number of permissions to retrieve.
-
- order: The order in which to retrieve permissions.
-
- project_id: The ID of the project to get permissions for.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return self._get(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- "project_id": project_id,
- },
- permission_retrieve_params.PermissionRetrieveParams,
- ),
- ),
- cast_to=ListFineTuningCheckpointPermission,
- )
-
- def delete(
- self,
- permission_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> PermissionDeleteResponse:
- """
- **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
-
- Organization owners can use this endpoint to delete a permission for a
- fine-tuned model checkpoint.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return self._delete(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=PermissionDeleteResponse,
- )
-
-
-class AsyncPermissionsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncPermissionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncPermissionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncPermissionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncPermissionsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- permission_id: str,
- *,
- project_ids: List[str],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListFineTuningCheckpointPermission:
- """
- **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
-
- This enables organization owners to share fine-tuned models with other projects
- in their organization.
-
- Args:
- project_ids: The project identifiers to grant access to.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return await self._post(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- body=await async_maybe_transform(
- {"project_ids": project_ids}, permission_create_params.PermissionCreateParams
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ListFineTuningCheckpointPermission,
- )
-
- async def retrieve(
- self,
- permission_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN,
- project_id: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListFineTuningCheckpointPermission:
- """
- **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
-
- Organization owners can use this endpoint to view all permissions for a
- fine-tuned model checkpoint.
-
- Args:
- after: Identifier for the last permission ID from the previous pagination request.
-
- limit: Number of permissions to retrieve.
-
- order: The order in which to retrieve permissions.
-
- project_id: The ID of the project to get permissions for.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return await self._get(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- "project_id": project_id,
- },
- permission_retrieve_params.PermissionRetrieveParams,
- ),
- ),
- cast_to=ListFineTuningCheckpointPermission,
- )
-
- async def delete(
- self,
- permission_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> PermissionDeleteResponse:
- """
- **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
-
- Organization owners can use this endpoint to delete a permission for a
- fine-tuned model checkpoint.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not permission_id:
- raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
- return await self._delete(
- f"/fine_tuning/checkpoints/{permission_id}/permissions",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=PermissionDeleteResponse,
- )
-
-
-class PermissionsResourceWithRawResponse:
- def __init__(self, permissions: PermissionsResource) -> None:
- self._permissions = permissions
-
- self.create = to_raw_response_wrapper(
- permissions.create,
- )
- self.retrieve = to_raw_response_wrapper(
- permissions.retrieve,
- )
- self.delete = to_raw_response_wrapper(
- permissions.delete,
- )
-
-
-class AsyncPermissionsResourceWithRawResponse:
- def __init__(self, permissions: AsyncPermissionsResource) -> None:
- self._permissions = permissions
-
- self.create = async_to_raw_response_wrapper(
- permissions.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- permissions.retrieve,
- )
- self.delete = async_to_raw_response_wrapper(
- permissions.delete,
- )
-
-
-class PermissionsResourceWithStreamingResponse:
- def __init__(self, permissions: PermissionsResource) -> None:
- self._permissions = permissions
-
- self.create = to_streamed_response_wrapper(
- permissions.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- permissions.retrieve,
- )
- self.delete = to_streamed_response_wrapper(
- permissions.delete,
- )
-
-
-class AsyncPermissionsResourceWithStreamingResponse:
- def __init__(self, permissions: AsyncPermissionsResource) -> None:
- self._permissions = permissions
-
- self.create = async_to_streamed_response_wrapper(
- permissions.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- permissions.retrieve,
- )
- self.delete = async_to_streamed_response_wrapper(
- permissions.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py b/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py
deleted file mode 100644
index 8b4956b1..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from ..._compat import cached_property
-from .jobs.jobs import (
- JobsResource,
- AsyncJobsResource,
- JobsResourceWithRawResponse,
- AsyncJobsResourceWithRawResponse,
- JobsResourceWithStreamingResponse,
- AsyncJobsResourceWithStreamingResponse,
-)
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from .checkpoints.checkpoints import (
- CheckpointsResource,
- AsyncCheckpointsResource,
- CheckpointsResourceWithRawResponse,
- AsyncCheckpointsResourceWithRawResponse,
- CheckpointsResourceWithStreamingResponse,
- AsyncCheckpointsResourceWithStreamingResponse,
-)
-
-__all__ = ["FineTuningResource", "AsyncFineTuningResource"]
-
-
-class FineTuningResource(SyncAPIResource):
- @cached_property
- def checkpoints(self) -> CheckpointsResource:
- return CheckpointsResource(self._client)
-
- @cached_property
- def jobs(self) -> JobsResource:
- return JobsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> FineTuningResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return FineTuningResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> FineTuningResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return FineTuningResourceWithStreamingResponse(self)
-
-
-class AsyncFineTuningResource(AsyncAPIResource):
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResource:
- return AsyncCheckpointsResource(self._client)
-
- @cached_property
- def jobs(self) -> AsyncJobsResource:
- return AsyncJobsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncFineTuningResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncFineTuningResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncFineTuningResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncFineTuningResourceWithStreamingResponse(self)
-
-
-class FineTuningResourceWithRawResponse:
- def __init__(self, fine_tuning: FineTuningResource) -> None:
- self._fine_tuning = fine_tuning
-
- @cached_property
- def checkpoints(self) -> CheckpointsResourceWithRawResponse:
- return CheckpointsResourceWithRawResponse(self._fine_tuning.checkpoints)
-
- @cached_property
- def jobs(self) -> JobsResourceWithRawResponse:
- return JobsResourceWithRawResponse(self._fine_tuning.jobs)
-
-
-class AsyncFineTuningResourceWithRawResponse:
- def __init__(self, fine_tuning: AsyncFineTuningResource) -> None:
- self._fine_tuning = fine_tuning
-
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResourceWithRawResponse:
- return AsyncCheckpointsResourceWithRawResponse(self._fine_tuning.checkpoints)
-
- @cached_property
- def jobs(self) -> AsyncJobsResourceWithRawResponse:
- return AsyncJobsResourceWithRawResponse(self._fine_tuning.jobs)
-
-
-class FineTuningResourceWithStreamingResponse:
- def __init__(self, fine_tuning: FineTuningResource) -> None:
- self._fine_tuning = fine_tuning
-
- @cached_property
- def checkpoints(self) -> CheckpointsResourceWithStreamingResponse:
- return CheckpointsResourceWithStreamingResponse(self._fine_tuning.checkpoints)
-
- @cached_property
- def jobs(self) -> JobsResourceWithStreamingResponse:
- return JobsResourceWithStreamingResponse(self._fine_tuning.jobs)
-
-
-class AsyncFineTuningResourceWithStreamingResponse:
- def __init__(self, fine_tuning: AsyncFineTuningResource) -> None:
- self._fine_tuning = fine_tuning
-
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResourceWithStreamingResponse:
- return AsyncCheckpointsResourceWithStreamingResponse(self._fine_tuning.checkpoints)
-
- @cached_property
- def jobs(self) -> AsyncJobsResourceWithStreamingResponse:
- return AsyncJobsResourceWithStreamingResponse(self._fine_tuning.jobs)
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py
deleted file mode 100644
index 90e643d7..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .jobs import (
- JobsResource,
- AsyncJobsResource,
- JobsResourceWithRawResponse,
- AsyncJobsResourceWithRawResponse,
- JobsResourceWithStreamingResponse,
- AsyncJobsResourceWithStreamingResponse,
-)
-from .events import (
- EventsResource,
- AsyncEventsResource,
- EventsResourceWithRawResponse,
- AsyncEventsResourceWithRawResponse,
- EventsResourceWithStreamingResponse,
- AsyncEventsResourceWithStreamingResponse,
-)
-from .checkpoints import (
- CheckpointsResource,
- AsyncCheckpointsResource,
- CheckpointsResourceWithRawResponse,
- AsyncCheckpointsResourceWithRawResponse,
- CheckpointsResourceWithStreamingResponse,
- AsyncCheckpointsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "CheckpointsResource",
- "AsyncCheckpointsResource",
- "CheckpointsResourceWithRawResponse",
- "AsyncCheckpointsResourceWithRawResponse",
- "CheckpointsResourceWithStreamingResponse",
- "AsyncCheckpointsResourceWithStreamingResponse",
- "EventsResource",
- "AsyncEventsResource",
- "EventsResourceWithRawResponse",
- "AsyncEventsResourceWithRawResponse",
- "EventsResourceWithStreamingResponse",
- "AsyncEventsResourceWithStreamingResponse",
- "JobsResource",
- "AsyncJobsResource",
- "JobsResourceWithRawResponse",
- "AsyncJobsResourceWithRawResponse",
- "JobsResourceWithStreamingResponse",
- "AsyncJobsResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py
deleted file mode 100644
index d9ade070..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.fine_tuning.jobs import checkpoint_retrieve_params
-from ....types.fine_tuning.jobs.checkpoint_retrieve_response import CheckpointRetrieveResponse
-
-__all__ = ["CheckpointsResource", "AsyncCheckpointsResource"]
-
-
-class CheckpointsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> CheckpointsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return CheckpointsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> CheckpointsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return CheckpointsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CheckpointRetrieveResponse:
- """
- List checkpoints for a fine-tuning job.
-
- Args:
- after: Identifier for the last checkpoint ID from the previous pagination request.
-
- limit: Number of checkpoints to retrieve.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- checkpoint_retrieve_params.CheckpointRetrieveParams,
- ),
- ),
- cast_to=CheckpointRetrieveResponse,
- )
-
-
-class AsyncCheckpointsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncCheckpointsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncCheckpointsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncCheckpointsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncCheckpointsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CheckpointRetrieveResponse:
- """
- List checkpoints for a fine-tuning job.
-
- Args:
- after: Identifier for the last checkpoint ID from the previous pagination request.
-
- limit: Number of checkpoints to retrieve.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return await self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- checkpoint_retrieve_params.CheckpointRetrieveParams,
- ),
- ),
- cast_to=CheckpointRetrieveResponse,
- )
-
-
-class CheckpointsResourceWithRawResponse:
- def __init__(self, checkpoints: CheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- self.retrieve = to_raw_response_wrapper(
- checkpoints.retrieve,
- )
-
-
-class AsyncCheckpointsResourceWithRawResponse:
- def __init__(self, checkpoints: AsyncCheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- self.retrieve = async_to_raw_response_wrapper(
- checkpoints.retrieve,
- )
-
-
-class CheckpointsResourceWithStreamingResponse:
- def __init__(self, checkpoints: CheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- self.retrieve = to_streamed_response_wrapper(
- checkpoints.retrieve,
- )
-
-
-class AsyncCheckpointsResourceWithStreamingResponse:
- def __init__(self, checkpoints: AsyncCheckpointsResource) -> None:
- self._checkpoints = checkpoints
-
- self.retrieve = async_to_streamed_response_wrapper(
- checkpoints.retrieve,
- )
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py
deleted file mode 100644
index 6005084f..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.fine_tuning.jobs import event_retrieve_params
-from ....types.fine_tuning.jobs.event_retrieve_response import EventRetrieveResponse
-
-__all__ = ["EventsResource", "AsyncEventsResource"]
-
-
-class EventsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> EventsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return EventsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> EventsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return EventsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> EventRetrieveResponse:
- """
- Get status updates for a fine-tuning job.
-
- Args:
- after: Identifier for the last event from the previous pagination request.
-
- limit: Number of events to retrieve.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- event_retrieve_params.EventRetrieveParams,
- ),
- ),
- cast_to=EventRetrieveResponse,
- )
-
-
-class AsyncEventsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncEventsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncEventsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncEventsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncEventsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> EventRetrieveResponse:
- """
- Get status updates for a fine-tuning job.
-
- Args:
- after: Identifier for the last event from the previous pagination request.
-
- limit: Number of events to retrieve.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return await self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- event_retrieve_params.EventRetrieveParams,
- ),
- ),
- cast_to=EventRetrieveResponse,
- )
-
-
-class EventsResourceWithRawResponse:
- def __init__(self, events: EventsResource) -> None:
- self._events = events
-
- self.retrieve = to_raw_response_wrapper(
- events.retrieve,
- )
-
-
-class AsyncEventsResourceWithRawResponse:
- def __init__(self, events: AsyncEventsResource) -> None:
- self._events = events
-
- self.retrieve = async_to_raw_response_wrapper(
- events.retrieve,
- )
-
-
-class EventsResourceWithStreamingResponse:
- def __init__(self, events: EventsResource) -> None:
- self._events = events
-
- self.retrieve = to_streamed_response_wrapper(
- events.retrieve,
- )
-
-
-class AsyncEventsResourceWithStreamingResponse:
- def __init__(self, events: AsyncEventsResource) -> None:
- self._events = events
-
- self.retrieve = async_to_streamed_response_wrapper(
- events.retrieve,
- )
diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py
deleted file mode 100644
index 86a7ae4b..00000000
--- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py
+++ /dev/null
@@ -1,668 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from .events import (
- EventsResource,
- AsyncEventsResource,
- EventsResourceWithRawResponse,
- AsyncEventsResourceWithRawResponse,
- EventsResourceWithStreamingResponse,
- AsyncEventsResourceWithStreamingResponse,
-)
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from .checkpoints import (
- CheckpointsResource,
- AsyncCheckpointsResource,
- CheckpointsResourceWithRawResponse,
- AsyncCheckpointsResourceWithRawResponse,
- CheckpointsResourceWithStreamingResponse,
- AsyncCheckpointsResourceWithStreamingResponse,
-)
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.fine_tuning import job_list_params, job_create_params
-from ....types.fine_tuning.fine_tuning_job import FineTuningJob
-from ....types.fine_tuning.job_list_response import JobListResponse
-from ....types.fine_tuning.fine_tune_method_param import FineTuneMethodParam
-
-__all__ = ["JobsResource", "AsyncJobsResource"]
-
-
-class JobsResource(SyncAPIResource):
- @cached_property
- def checkpoints(self) -> CheckpointsResource:
- return CheckpointsResource(self._client)
-
- @cached_property
- def events(self) -> EventsResource:
- return EventsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> JobsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return JobsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> JobsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return JobsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
- training_file: str,
- hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
- integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- method: FineTuneMethodParam | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- suffix: Optional[str] | NotGiven = NOT_GIVEN,
- validation_file: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Creates a fine-tuning job which begins the process of creating a new model from
- a given dataset.
-
- Response includes details of the enqueued job including job status and the name
- of the fine-tuned models once complete.
-
- [Learn more about fine-tuning](/docs/guides/fine-tuning)
-
- Args:
- model: The name of the model to fine-tune. You can select one of the
- [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
-
- training_file: The ID of an uploaded file that contains training data.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your dataset must be formatted as a JSONL file. Additionally, you must upload
- your file with the purpose `fine-tune`.
-
- The contents of the file should differ depending on if the model uses the
- [chat](/docs/api-reference/fine-tuning/chat-input),
- [completions](/docs/api-reference/fine-tuning/completions-input) format, or if
- the fine-tuning method uses the
- [preference](/docs/api-reference/fine-tuning/preference-input) format.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
-
- hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
- in favor of `method`, and should be passed in under the `method` parameter.
-
- integrations: A list of integrations to enable for your fine-tuning job.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- method: The method used for fine-tuning.
-
- seed: The seed controls the reproducibility of the job. Passing in the same seed and
- job parameters should produce the same results, but may differ in rare cases. If
- a seed is not specified, one will be generated for you.
-
- suffix: A string of up to 64 characters that will be added to your fine-tuned model
- name.
-
- For example, a `suffix` of "custom-model-name" would produce a model name like
- `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
-
- validation_file: The ID of an uploaded file that contains validation data.
-
- If you provide this file, the data is used to generate validation metrics
- periodically during fine-tuning. These metrics can be viewed in the fine-tuning
- results file. The same data should not be present in both train and validation
- files.
-
- Your dataset must be formatted as a JSONL file. You must upload your file with
- the purpose `fine-tune`.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/fine_tuning/jobs",
- body=maybe_transform(
- {
- "model": model,
- "training_file": training_file,
- "hyperparameters": hyperparameters,
- "integrations": integrations,
- "metadata": metadata,
- "method": method,
- "seed": seed,
- "suffix": suffix,
- "validation_file": validation_file,
- },
- job_create_params.JobCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
- def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Get info about a fine-tuning job.
-
- [Learn more about fine-tuning](/docs/guides/fine-tuning)
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> JobListResponse:
- """
- List your organization's fine-tuning jobs
-
- Args:
- after: Identifier for the last job from the previous pagination request.
-
- limit: Number of fine-tuning jobs to retrieve.
-
- metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
- Alternatively, set `metadata=null` to indicate no metadata.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/fine_tuning/jobs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "metadata": metadata,
- },
- job_list_params.JobListParams,
- ),
- ),
- cast_to=JobListResponse,
- )
-
- def cancel(
- self,
- fine_tuning_job_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Immediately cancel a fine-tune job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return self._post(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
-
-class AsyncJobsResource(AsyncAPIResource):
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResource:
- return AsyncCheckpointsResource(self._client)
-
- @cached_property
- def events(self) -> AsyncEventsResource:
- return AsyncEventsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncJobsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncJobsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncJobsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncJobsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
- training_file: str,
- hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
- integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- method: FineTuneMethodParam | NotGiven = NOT_GIVEN,
- seed: Optional[int] | NotGiven = NOT_GIVEN,
- suffix: Optional[str] | NotGiven = NOT_GIVEN,
- validation_file: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Creates a fine-tuning job which begins the process of creating a new model from
- a given dataset.
-
- Response includes details of the enqueued job including job status and the name
- of the fine-tuned models once complete.
-
- [Learn more about fine-tuning](/docs/guides/fine-tuning)
-
- Args:
- model: The name of the model to fine-tune. You can select one of the
- [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
-
- training_file: The ID of an uploaded file that contains training data.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your dataset must be formatted as a JSONL file. Additionally, you must upload
- your file with the purpose `fine-tune`.
-
- The contents of the file should differ depending on if the model uses the
- [chat](/docs/api-reference/fine-tuning/chat-input),
- [completions](/docs/api-reference/fine-tuning/completions-input) format, or if
- the fine-tuning method uses the
- [preference](/docs/api-reference/fine-tuning/preference-input) format.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
-
- hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
- in favor of `method`, and should be passed in under the `method` parameter.
-
- integrations: A list of integrations to enable for your fine-tuning job.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- method: The method used for fine-tuning.
-
- seed: The seed controls the reproducibility of the job. Passing in the same seed and
- job parameters should produce the same results, but may differ in rare cases. If
- a seed is not specified, one will be generated for you.
-
- suffix: A string of up to 64 characters that will be added to your fine-tuned model
- name.
-
- For example, a `suffix` of "custom-model-name" would produce a model name like
- `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
-
- validation_file: The ID of an uploaded file that contains validation data.
-
- If you provide this file, the data is used to generate validation metrics
- periodically during fine-tuning. These metrics can be viewed in the fine-tuning
- results file. The same data should not be present in both train and validation
- files.
-
- Your dataset must be formatted as a JSONL file. You must upload your file with
- the purpose `fine-tune`.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/fine_tuning/jobs",
- body=await async_maybe_transform(
- {
- "model": model,
- "training_file": training_file,
- "hyperparameters": hyperparameters,
- "integrations": integrations,
- "metadata": metadata,
- "method": method,
- "seed": seed,
- "suffix": suffix,
- "validation_file": validation_file,
- },
- job_create_params.JobCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
- async def retrieve(
- self,
- fine_tuning_job_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Get info about a fine-tuning job.
-
- [Learn more about fine-tuning](/docs/guides/fine-tuning)
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return await self._get(
- f"/fine_tuning/jobs/{fine_tuning_job_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> JobListResponse:
- """
- List your organization's fine-tuning jobs
-
- Args:
- after: Identifier for the last job from the previous pagination request.
-
- limit: Number of fine-tuning jobs to retrieve.
-
- metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
- Alternatively, set `metadata=null` to indicate no metadata.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/fine_tuning/jobs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "metadata": metadata,
- },
- job_list_params.JobListParams,
- ),
- ),
- cast_to=JobListResponse,
- )
-
- async def cancel(
- self,
- fine_tuning_job_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTuningJob:
- """
- Immediately cancel a fine-tune job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not fine_tuning_job_id:
- raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
- return await self._post(
- f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTuningJob,
- )
-
-
-class JobsResourceWithRawResponse:
- def __init__(self, jobs: JobsResource) -> None:
- self._jobs = jobs
-
- self.create = to_raw_response_wrapper(
- jobs.create,
- )
- self.retrieve = to_raw_response_wrapper(
- jobs.retrieve,
- )
- self.list = to_raw_response_wrapper(
- jobs.list,
- )
- self.cancel = to_raw_response_wrapper(
- jobs.cancel,
- )
-
- @cached_property
- def checkpoints(self) -> CheckpointsResourceWithRawResponse:
- return CheckpointsResourceWithRawResponse(self._jobs.checkpoints)
-
- @cached_property
- def events(self) -> EventsResourceWithRawResponse:
- return EventsResourceWithRawResponse(self._jobs.events)
-
-
-class AsyncJobsResourceWithRawResponse:
- def __init__(self, jobs: AsyncJobsResource) -> None:
- self._jobs = jobs
-
- self.create = async_to_raw_response_wrapper(
- jobs.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- jobs.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- jobs.list,
- )
- self.cancel = async_to_raw_response_wrapper(
- jobs.cancel,
- )
-
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResourceWithRawResponse:
- return AsyncCheckpointsResourceWithRawResponse(self._jobs.checkpoints)
-
- @cached_property
- def events(self) -> AsyncEventsResourceWithRawResponse:
- return AsyncEventsResourceWithRawResponse(self._jobs.events)
-
-
-class JobsResourceWithStreamingResponse:
- def __init__(self, jobs: JobsResource) -> None:
- self._jobs = jobs
-
- self.create = to_streamed_response_wrapper(
- jobs.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- jobs.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- jobs.list,
- )
- self.cancel = to_streamed_response_wrapper(
- jobs.cancel,
- )
-
- @cached_property
- def checkpoints(self) -> CheckpointsResourceWithStreamingResponse:
- return CheckpointsResourceWithStreamingResponse(self._jobs.checkpoints)
-
- @cached_property
- def events(self) -> EventsResourceWithStreamingResponse:
- return EventsResourceWithStreamingResponse(self._jobs.events)
-
-
-class AsyncJobsResourceWithStreamingResponse:
- def __init__(self, jobs: AsyncJobsResource) -> None:
- self._jobs = jobs
-
- self.create = async_to_streamed_response_wrapper(
- jobs.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- jobs.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- jobs.list,
- )
- self.cancel = async_to_streamed_response_wrapper(
- jobs.cancel,
- )
-
- @cached_property
- def checkpoints(self) -> AsyncCheckpointsResourceWithStreamingResponse:
- return AsyncCheckpointsResourceWithStreamingResponse(self._jobs.checkpoints)
-
- @cached_property
- def events(self) -> AsyncEventsResourceWithStreamingResponse:
- return AsyncEventsResourceWithStreamingResponse(self._jobs.events)
diff --git a/src/digitalocean_genai_sdk/resources/images.py b/src/digitalocean_genai_sdk/resources/images.py
deleted file mode 100644
index 56a52184..00000000
--- a/src/digitalocean_genai_sdk/resources/images.py
+++ /dev/null
@@ -1,592 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Mapping, Optional, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import image_create_edit_params, image_create_variation_params, image_create_generation_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.images_response import ImagesResponse
-
-__all__ = ["ImagesResource", "AsyncImagesResource"]
-
-
-class ImagesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ImagesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ImagesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ImagesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ImagesResourceWithStreamingResponse(self)
-
- def create_edit(
- self,
- *,
- image: FileTypes,
- prompt: str,
- mask: FileTypes | NotGiven = NOT_GIVEN,
- model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates an edited or extended image given an original image and a prompt.
-
- Args:
- image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
- is not provided, image must have transparency, which will be used as the mask.
-
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters.
-
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
-
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
-
- n: The number of images to generate. Must be between 1 and 10.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "prompt": prompt,
- "mask": mask,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- "/images/edits",
- body=maybe_transform(body, image_create_edit_params.ImageCreateEditParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
- def create_generation(
- self,
- *,
- prompt: str,
- model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN,
- style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates an image given a prompt.
-
- Args:
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2` and 4000 characters for `dall-e-3`.
-
- model: The model to use for image generation.
-
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
-
- quality: The quality of the image that will be generated. `hd` creates images with finer
- details and greater consistency across the image. This param is only supported
- for `dall-e-3`.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
- `1024x1792` for `dall-e-3` models.
-
- style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
- causes the model to lean towards generating hyper-real and dramatic images.
- Natural causes the model to produce more natural, less hyper-real looking
- images. This param is only supported for `dall-e-3`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/images/generations",
- body=maybe_transform(
- {
- "prompt": prompt,
- "model": model,
- "n": n,
- "quality": quality,
- "response_format": response_format,
- "size": size,
- "style": style,
- "user": user,
- },
- image_create_generation_params.ImageCreateGenerationParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
- def create_variation(
- self,
- *,
- image: FileTypes,
- model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates a variation of a given image.
-
- Args:
- image: The image to use as the basis for the variation(s). Must be a valid PNG file,
- less than 4MB, and square.
-
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
-
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- "/images/variations",
- body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
-
-class AsyncImagesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncImagesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncImagesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncImagesResourceWithStreamingResponse(self)
-
- async def create_edit(
- self,
- *,
- image: FileTypes,
- prompt: str,
- mask: FileTypes | NotGiven = NOT_GIVEN,
- model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates an edited or extended image given an original image and a prompt.
-
- Args:
- image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
- is not provided, image must have transparency, which will be used as the mask.
-
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters.
-
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
-
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
-
- n: The number of images to generate. Must be between 1 and 10.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "prompt": prompt,
- "mask": mask,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/images/edits",
- body=await async_maybe_transform(body, image_create_edit_params.ImageCreateEditParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
- async def create_generation(
- self,
- *,
- prompt: str,
- model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN,
- style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates an image given a prompt.
-
- Args:
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2` and 4000 characters for `dall-e-3`.
-
- model: The model to use for image generation.
-
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
-
- quality: The quality of the image that will be generated. `hd` creates images with finer
- details and greater consistency across the image. This param is only supported
- for `dall-e-3`.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
- `1024x1792` for `dall-e-3` models.
-
- style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
- causes the model to lean towards generating hyper-real and dramatic images.
- Natural causes the model to produce more natural, less hyper-real looking
- images. This param is only supported for `dall-e-3`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/images/generations",
- body=await async_maybe_transform(
- {
- "prompt": prompt,
- "model": model,
- "n": n,
- "quality": quality,
- "response_format": response_format,
- "size": size,
- "style": style,
- "user": user,
- },
- image_create_generation_params.ImageCreateGenerationParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
- async def create_variation(
- self,
- *,
- image: FileTypes,
- model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """
- Creates a variation of a given image.
-
- Args:
- image: The image to use as the basis for the variation(s). Must be a valid PNG file,
- less than 4MB, and square.
-
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
-
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/images/variations",
- body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
-
-class ImagesResourceWithRawResponse:
- def __init__(self, images: ImagesResource) -> None:
- self._images = images
-
- self.create_edit = to_raw_response_wrapper(
- images.create_edit,
- )
- self.create_generation = to_raw_response_wrapper(
- images.create_generation,
- )
- self.create_variation = to_raw_response_wrapper(
- images.create_variation,
- )
-
-
-class AsyncImagesResourceWithRawResponse:
- def __init__(self, images: AsyncImagesResource) -> None:
- self._images = images
-
- self.create_edit = async_to_raw_response_wrapper(
- images.create_edit,
- )
- self.create_generation = async_to_raw_response_wrapper(
- images.create_generation,
- )
- self.create_variation = async_to_raw_response_wrapper(
- images.create_variation,
- )
-
-
-class ImagesResourceWithStreamingResponse:
- def __init__(self, images: ImagesResource) -> None:
- self._images = images
-
- self.create_edit = to_streamed_response_wrapper(
- images.create_edit,
- )
- self.create_generation = to_streamed_response_wrapper(
- images.create_generation,
- )
- self.create_variation = to_streamed_response_wrapper(
- images.create_variation,
- )
-
-
-class AsyncImagesResourceWithStreamingResponse:
- def __init__(self, images: AsyncImagesResource) -> None:
- self._images = images
-
- self.create_edit = async_to_streamed_response_wrapper(
- images.create_edit,
- )
- self.create_generation = async_to_streamed_response_wrapper(
- images.create_generation,
- )
- self.create_variation = async_to_streamed_response_wrapper(
- images.create_variation,
- )
diff --git a/src/digitalocean_genai_sdk/resources/models.py b/src/digitalocean_genai_sdk/resources/models.py
deleted file mode 100644
index 53775057..00000000
--- a/src/digitalocean_genai_sdk/resources/models.py
+++ /dev/null
@@ -1,305 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..types.model import Model
-from .._base_client import make_request_options
-from ..types.model_list_response import ModelListResponse
-from ..types.model_delete_response import ModelDeleteResponse
-
-__all__ = ["ModelsResource", "AsyncModelsResource"]
-
-
-class ModelsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ModelsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ModelsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ModelsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- model: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Model:
- """
- Retrieves a model instance, providing basic information about the model such as
- the owner and permissioning.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not model:
- raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
- return self._get(
- f"/models/{model}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Model,
- )
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelListResponse:
- """
- Lists the currently available models, and provides basic information about each
- one such as the owner and availability.
- """
- return self._get(
- "/models",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelListResponse,
- )
-
- def delete(
- self,
- model: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelDeleteResponse:
- """Delete a fine-tuned model.
-
- You must have the Owner role in your organization to
- delete a model.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not model:
- raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
- return self._delete(
- f"/models/{model}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelDeleteResponse,
- )
-
-
-class AsyncModelsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncModelsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncModelsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncModelsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- model: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Model:
- """
- Retrieves a model instance, providing basic information about the model such as
- the owner and permissioning.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not model:
- raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
- return await self._get(
- f"/models/{model}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Model,
- )
-
- async def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelListResponse:
- """
- Lists the currently available models, and provides basic information about each
- one such as the owner and availability.
- """
- return await self._get(
- "/models",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelListResponse,
- )
-
- async def delete(
- self,
- model: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelDeleteResponse:
- """Delete a fine-tuned model.
-
- You must have the Owner role in your organization to
- delete a model.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not model:
- raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
- return await self._delete(
- f"/models/{model}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelDeleteResponse,
- )
-
-
-class ModelsResourceWithRawResponse:
- def __init__(self, models: ModelsResource) -> None:
- self._models = models
-
- self.retrieve = to_raw_response_wrapper(
- models.retrieve,
- )
- self.list = to_raw_response_wrapper(
- models.list,
- )
- self.delete = to_raw_response_wrapper(
- models.delete,
- )
-
-
-class AsyncModelsResourceWithRawResponse:
- def __init__(self, models: AsyncModelsResource) -> None:
- self._models = models
-
- self.retrieve = async_to_raw_response_wrapper(
- models.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- models.list,
- )
- self.delete = async_to_raw_response_wrapper(
- models.delete,
- )
-
-
-class ModelsResourceWithStreamingResponse:
- def __init__(self, models: ModelsResource) -> None:
- self._models = models
-
- self.retrieve = to_streamed_response_wrapper(
- models.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- models.list,
- )
- self.delete = to_streamed_response_wrapper(
- models.delete,
- )
-
-
-class AsyncModelsResourceWithStreamingResponse:
- def __init__(self, models: AsyncModelsResource) -> None:
- self._models = models
-
- self.retrieve = async_to_streamed_response_wrapper(
- models.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- models.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- models.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/moderations.py b/src/digitalocean_genai_sdk/resources/moderations.py
deleted file mode 100644
index e9404243..00000000
--- a/src/digitalocean_genai_sdk/resources/moderations.py
+++ /dev/null
@@ -1,216 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import moderation_classify_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.moderation_classify_response import ModerationClassifyResponse
-
-__all__ = ["ModerationsResource", "AsyncModerationsResource"]
-
-
-class ModerationsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ModerationsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ModerationsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ModerationsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ModerationsResourceWithStreamingResponse(self)
-
- def classify(
- self,
- *,
- input: Union[str, List[str], Iterable[moderation_classify_params.InputUnionMember2]],
- model: Union[
- str,
- Literal[
- "omni-moderation-latest",
- "omni-moderation-2024-09-26",
- "text-moderation-latest",
- "text-moderation-stable",
- ],
- ]
- | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModerationClassifyResponse:
- """Classifies if text and/or image inputs are potentially harmful.
-
- Learn more in
- the [moderation guide](/docs/guides/moderation).
-
- Args:
- input: Input (or inputs) to classify. Can be a single string, an array of strings, or
- an array of multi-modal input objects similar to other models.
-
- model: The content moderation model you would like to use. Learn more in
- [the moderation guide](/docs/guides/moderation), and learn about available
- models [here](/docs/models#moderation).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/moderations",
- body=maybe_transform(
- {
- "input": input,
- "model": model,
- },
- moderation_classify_params.ModerationClassifyParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModerationClassifyResponse,
- )
-
-
-class AsyncModerationsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncModerationsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncModerationsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncModerationsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncModerationsResourceWithStreamingResponse(self)
-
- async def classify(
- self,
- *,
- input: Union[str, List[str], Iterable[moderation_classify_params.InputUnionMember2]],
- model: Union[
- str,
- Literal[
- "omni-moderation-latest",
- "omni-moderation-2024-09-26",
- "text-moderation-latest",
- "text-moderation-stable",
- ],
- ]
- | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModerationClassifyResponse:
- """Classifies if text and/or image inputs are potentially harmful.
-
- Learn more in
- the [moderation guide](/docs/guides/moderation).
-
- Args:
- input: Input (or inputs) to classify. Can be a single string, an array of strings, or
- an array of multi-modal input objects similar to other models.
-
- model: The content moderation model you would like to use. Learn more in
- [the moderation guide](/docs/guides/moderation), and learn about available
- models [here](/docs/models#moderation).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/moderations",
- body=await async_maybe_transform(
- {
- "input": input,
- "model": model,
- },
- moderation_classify_params.ModerationClassifyParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModerationClassifyResponse,
- )
-
-
-class ModerationsResourceWithRawResponse:
- def __init__(self, moderations: ModerationsResource) -> None:
- self._moderations = moderations
-
- self.classify = to_raw_response_wrapper(
- moderations.classify,
- )
-
-
-class AsyncModerationsResourceWithRawResponse:
- def __init__(self, moderations: AsyncModerationsResource) -> None:
- self._moderations = moderations
-
- self.classify = async_to_raw_response_wrapper(
- moderations.classify,
- )
-
-
-class ModerationsResourceWithStreamingResponse:
- def __init__(self, moderations: ModerationsResource) -> None:
- self._moderations = moderations
-
- self.classify = to_streamed_response_wrapper(
- moderations.classify,
- )
-
-
-class AsyncModerationsResourceWithStreamingResponse:
- def __init__(self, moderations: AsyncModerationsResource) -> None:
- self._moderations = moderations
-
- self.classify = async_to_streamed_response_wrapper(
- moderations.classify,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/__init__.py b/src/digitalocean_genai_sdk/resources/organization/__init__.py
deleted file mode 100644
index cf206d71..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/__init__.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .usage import (
- UsageResource,
- AsyncUsageResource,
- UsageResourceWithRawResponse,
- AsyncUsageResourceWithRawResponse,
- UsageResourceWithStreamingResponse,
- AsyncUsageResourceWithStreamingResponse,
-)
-from .users import (
- UsersResource,
- AsyncUsersResource,
- UsersResourceWithRawResponse,
- AsyncUsersResourceWithRawResponse,
- UsersResourceWithStreamingResponse,
- AsyncUsersResourceWithStreamingResponse,
-)
-from .invites import (
- InvitesResource,
- AsyncInvitesResource,
- InvitesResourceWithRawResponse,
- AsyncInvitesResourceWithRawResponse,
- InvitesResourceWithStreamingResponse,
- AsyncInvitesResourceWithStreamingResponse,
-)
-from .projects import (
- ProjectsResource,
- AsyncProjectsResource,
- ProjectsResourceWithRawResponse,
- AsyncProjectsResourceWithRawResponse,
- ProjectsResourceWithStreamingResponse,
- AsyncProjectsResourceWithStreamingResponse,
-)
-from .organization import (
- OrganizationResource,
- AsyncOrganizationResource,
- OrganizationResourceWithRawResponse,
- AsyncOrganizationResourceWithRawResponse,
- OrganizationResourceWithStreamingResponse,
- AsyncOrganizationResourceWithStreamingResponse,
-)
-from .admin_api_keys import (
- AdminAPIKeysResource,
- AsyncAdminAPIKeysResource,
- AdminAPIKeysResourceWithRawResponse,
- AsyncAdminAPIKeysResourceWithRawResponse,
- AdminAPIKeysResourceWithStreamingResponse,
- AsyncAdminAPIKeysResourceWithStreamingResponse,
-)
-
-__all__ = [
- "AdminAPIKeysResource",
- "AsyncAdminAPIKeysResource",
- "AdminAPIKeysResourceWithRawResponse",
- "AsyncAdminAPIKeysResourceWithRawResponse",
- "AdminAPIKeysResourceWithStreamingResponse",
- "AsyncAdminAPIKeysResourceWithStreamingResponse",
- "InvitesResource",
- "AsyncInvitesResource",
- "InvitesResourceWithRawResponse",
- "AsyncInvitesResourceWithRawResponse",
- "InvitesResourceWithStreamingResponse",
- "AsyncInvitesResourceWithStreamingResponse",
- "ProjectsResource",
- "AsyncProjectsResource",
- "ProjectsResourceWithRawResponse",
- "AsyncProjectsResourceWithRawResponse",
- "ProjectsResourceWithStreamingResponse",
- "AsyncProjectsResourceWithStreamingResponse",
- "UsageResource",
- "AsyncUsageResource",
- "UsageResourceWithRawResponse",
- "AsyncUsageResourceWithRawResponse",
- "UsageResourceWithStreamingResponse",
- "AsyncUsageResourceWithStreamingResponse",
- "UsersResource",
- "AsyncUsersResource",
- "UsersResourceWithRawResponse",
- "AsyncUsersResourceWithRawResponse",
- "UsersResourceWithStreamingResponse",
- "AsyncUsersResourceWithStreamingResponse",
- "OrganizationResource",
- "AsyncOrganizationResource",
- "OrganizationResourceWithRawResponse",
- "AsyncOrganizationResourceWithRawResponse",
- "OrganizationResourceWithStreamingResponse",
- "AsyncOrganizationResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py b/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py
deleted file mode 100644
index 7224871f..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py
+++ /dev/null
@@ -1,444 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.organization import admin_api_key_list_params, admin_api_key_create_params
-from ...types.organization.admin_api_key import AdminAPIKey
-from ...types.organization.admin_api_key_list_response import AdminAPIKeyListResponse
-from ...types.organization.admin_api_key_delete_response import AdminAPIKeyDeleteResponse
-
-__all__ = ["AdminAPIKeysResource", "AsyncAdminAPIKeysResource"]
-
-
-class AdminAPIKeysResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AdminAPIKeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AdminAPIKeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AdminAPIKeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AdminAPIKeysResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKey:
- """
- Create a new admin-level API key for the organization.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/organization/admin_api_keys",
- body=maybe_transform({"name": name}, admin_api_key_create_params.AdminAPIKeyCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKey,
- )
-
- def retrieve(
- self,
- key_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKey:
- """
- Get details for a specific organization API key by its ID.
-
- Args:
- key_id: The ID of the API key.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return self._get(
- f"/organization/admin_api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKey,
- )
-
- def list(
- self,
- *,
- after: Optional[str] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKeyListResponse:
- """
- Retrieve a paginated list of organization admin API keys.
-
- Args:
- after: Return keys with IDs that come after this ID in the pagination order.
-
- limit: Maximum number of keys to return.
-
- order: Order results by creation time, ascending or descending.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/admin_api_keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- },
- admin_api_key_list_params.AdminAPIKeyListParams,
- ),
- ),
- cast_to=AdminAPIKeyListResponse,
- )
-
- def delete(
- self,
- key_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKeyDeleteResponse:
- """
- Delete the specified admin API key.
-
- Args:
- key_id: The ID of the API key to be deleted.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return self._delete(
- f"/organization/admin_api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKeyDeleteResponse,
- )
-
-
-class AsyncAdminAPIKeysResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncAdminAPIKeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncAdminAPIKeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncAdminAPIKeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncAdminAPIKeysResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKey:
- """
- Create a new admin-level API key for the organization.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/organization/admin_api_keys",
- body=await async_maybe_transform({"name": name}, admin_api_key_create_params.AdminAPIKeyCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKey,
- )
-
- async def retrieve(
- self,
- key_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKey:
- """
- Get details for a specific organization API key by its ID.
-
- Args:
- key_id: The ID of the API key.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return await self._get(
- f"/organization/admin_api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKey,
- )
-
- async def list(
- self,
- *,
- after: Optional[str] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKeyListResponse:
- """
- Retrieve a paginated list of organization admin API keys.
-
- Args:
- after: Return keys with IDs that come after this ID in the pagination order.
-
- limit: Maximum number of keys to return.
-
- order: Order results by creation time, ascending or descending.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/admin_api_keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- "order": order,
- },
- admin_api_key_list_params.AdminAPIKeyListParams,
- ),
- ),
- cast_to=AdminAPIKeyListResponse,
- )
-
- async def delete(
- self,
- key_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AdminAPIKeyDeleteResponse:
- """
- Delete the specified admin API key.
-
- Args:
- key_id: The ID of the API key to be deleted.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return await self._delete(
- f"/organization/admin_api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AdminAPIKeyDeleteResponse,
- )
-
-
-class AdminAPIKeysResourceWithRawResponse:
- def __init__(self, admin_api_keys: AdminAPIKeysResource) -> None:
- self._admin_api_keys = admin_api_keys
-
- self.create = to_raw_response_wrapper(
- admin_api_keys.create,
- )
- self.retrieve = to_raw_response_wrapper(
- admin_api_keys.retrieve,
- )
- self.list = to_raw_response_wrapper(
- admin_api_keys.list,
- )
- self.delete = to_raw_response_wrapper(
- admin_api_keys.delete,
- )
-
-
-class AsyncAdminAPIKeysResourceWithRawResponse:
- def __init__(self, admin_api_keys: AsyncAdminAPIKeysResource) -> None:
- self._admin_api_keys = admin_api_keys
-
- self.create = async_to_raw_response_wrapper(
- admin_api_keys.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- admin_api_keys.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- admin_api_keys.list,
- )
- self.delete = async_to_raw_response_wrapper(
- admin_api_keys.delete,
- )
-
-
-class AdminAPIKeysResourceWithStreamingResponse:
- def __init__(self, admin_api_keys: AdminAPIKeysResource) -> None:
- self._admin_api_keys = admin_api_keys
-
- self.create = to_streamed_response_wrapper(
- admin_api_keys.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- admin_api_keys.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- admin_api_keys.list,
- )
- self.delete = to_streamed_response_wrapper(
- admin_api_keys.delete,
- )
-
-
-class AsyncAdminAPIKeysResourceWithStreamingResponse:
- def __init__(self, admin_api_keys: AsyncAdminAPIKeysResource) -> None:
- self._admin_api_keys = admin_api_keys
-
- self.create = async_to_streamed_response_wrapper(
- admin_api_keys.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- admin_api_keys.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- admin_api_keys.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- admin_api_keys.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/invites.py b/src/digitalocean_genai_sdk/resources/organization/invites.py
deleted file mode 100644
index 16bd17bc..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/invites.py
+++ /dev/null
@@ -1,476 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.organization import invite_list_params, invite_create_params
-from ...types.organization.invite import Invite
-from ...types.organization.invite_list_response import InviteListResponse
-from ...types.organization.invite_delete_response import InviteDeleteResponse
-
-__all__ = ["InvitesResource", "AsyncInvitesResource"]
-
-
-class InvitesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> InvitesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return InvitesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> InvitesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return InvitesResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- email: str,
- role: Literal["reader", "owner"],
- projects: Iterable[invite_create_params.Project] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Invite:
- """Create an invite for a user to the organization.
-
- The invite must be accepted by
- the user before they have access to the organization.
-
- Args:
- email: Send an email to this address
-
- role: `owner` or `reader`
-
- projects: An array of projects to which membership is granted at the same time the org
- invite is accepted. If omitted, the user will be invited to the default project
- for compatibility with legacy behavior.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/organization/invites",
- body=maybe_transform(
- {
- "email": email,
- "role": role,
- "projects": projects,
- },
- invite_create_params.InviteCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Invite,
- )
-
- def retrieve(
- self,
- invite_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Invite:
- """
- Retrieves an invite.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not invite_id:
- raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
- return self._get(
- f"/organization/invites/{invite_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Invite,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> InviteListResponse:
- """
- Returns a list of invites in the organization.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/invites",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- invite_list_params.InviteListParams,
- ),
- ),
- cast_to=InviteListResponse,
- )
-
- def delete(
- self,
- invite_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> InviteDeleteResponse:
- """Delete an invite.
-
- If the invite has already been accepted, it cannot be deleted.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not invite_id:
- raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
- return self._delete(
- f"/organization/invites/{invite_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=InviteDeleteResponse,
- )
-
-
-class AsyncInvitesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncInvitesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncInvitesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncInvitesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncInvitesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- email: str,
- role: Literal["reader", "owner"],
- projects: Iterable[invite_create_params.Project] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Invite:
- """Create an invite for a user to the organization.
-
- The invite must be accepted by
- the user before they have access to the organization.
-
- Args:
- email: Send an email to this address
-
- role: `owner` or `reader`
-
- projects: An array of projects to which membership is granted at the same time the org
- invite is accepted. If omitted, the user will be invited to the default project
- for compatibility with legacy behavior.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/organization/invites",
- body=await async_maybe_transform(
- {
- "email": email,
- "role": role,
- "projects": projects,
- },
- invite_create_params.InviteCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Invite,
- )
-
- async def retrieve(
- self,
- invite_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Invite:
- """
- Retrieves an invite.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not invite_id:
- raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
- return await self._get(
- f"/organization/invites/{invite_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Invite,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> InviteListResponse:
- """
- Returns a list of invites in the organization.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/invites",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- invite_list_params.InviteListParams,
- ),
- ),
- cast_to=InviteListResponse,
- )
-
- async def delete(
- self,
- invite_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> InviteDeleteResponse:
- """Delete an invite.
-
- If the invite has already been accepted, it cannot be deleted.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not invite_id:
- raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}")
- return await self._delete(
- f"/organization/invites/{invite_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=InviteDeleteResponse,
- )
-
-
-class InvitesResourceWithRawResponse:
- def __init__(self, invites: InvitesResource) -> None:
- self._invites = invites
-
- self.create = to_raw_response_wrapper(
- invites.create,
- )
- self.retrieve = to_raw_response_wrapper(
- invites.retrieve,
- )
- self.list = to_raw_response_wrapper(
- invites.list,
- )
- self.delete = to_raw_response_wrapper(
- invites.delete,
- )
-
-
-class AsyncInvitesResourceWithRawResponse:
- def __init__(self, invites: AsyncInvitesResource) -> None:
- self._invites = invites
-
- self.create = async_to_raw_response_wrapper(
- invites.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- invites.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- invites.list,
- )
- self.delete = async_to_raw_response_wrapper(
- invites.delete,
- )
-
-
-class InvitesResourceWithStreamingResponse:
- def __init__(self, invites: InvitesResource) -> None:
- self._invites = invites
-
- self.create = to_streamed_response_wrapper(
- invites.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- invites.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- invites.list,
- )
- self.delete = to_streamed_response_wrapper(
- invites.delete,
- )
-
-
-class AsyncInvitesResourceWithStreamingResponse:
- def __init__(self, invites: AsyncInvitesResource) -> None:
- self._invites = invites
-
- self.create = async_to_streamed_response_wrapper(
- invites.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- invites.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- invites.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- invites.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/organization.py b/src/digitalocean_genai_sdk/resources/organization/organization.py
deleted file mode 100644
index 4a9aa4fb..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/organization.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal
-
-import httpx
-
-from .usage import (
- UsageResource,
- AsyncUsageResource,
- UsageResourceWithRawResponse,
- AsyncUsageResourceWithRawResponse,
- UsageResourceWithStreamingResponse,
- AsyncUsageResourceWithStreamingResponse,
-)
-from .users import (
- UsersResource,
- AsyncUsersResource,
- UsersResourceWithRawResponse,
- AsyncUsersResourceWithRawResponse,
- UsersResourceWithStreamingResponse,
- AsyncUsersResourceWithStreamingResponse,
-)
-from ...types import organization_get_costs_params, organization_list_audit_logs_params
-from .invites import (
- InvitesResource,
- AsyncInvitesResource,
- InvitesResourceWithRawResponse,
- AsyncInvitesResourceWithRawResponse,
- InvitesResourceWithStreamingResponse,
- AsyncInvitesResourceWithStreamingResponse,
-)
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from .admin_api_keys import (
- AdminAPIKeysResource,
- AsyncAdminAPIKeysResource,
- AdminAPIKeysResourceWithRawResponse,
- AsyncAdminAPIKeysResourceWithRawResponse,
- AdminAPIKeysResourceWithStreamingResponse,
- AsyncAdminAPIKeysResourceWithStreamingResponse,
-)
-from .projects.projects import (
- ProjectsResource,
- AsyncProjectsResource,
- ProjectsResourceWithRawResponse,
- AsyncProjectsResourceWithRawResponse,
- ProjectsResourceWithStreamingResponse,
- AsyncProjectsResourceWithStreamingResponse,
-)
-from ...types.usage_response import UsageResponse
-from ...types.audit_log_event_type import AuditLogEventType
-from ...types.organization_list_audit_logs_response import OrganizationListAuditLogsResponse
-
-__all__ = ["OrganizationResource", "AsyncOrganizationResource"]
-
-
-class OrganizationResource(SyncAPIResource):
- @cached_property
- def admin_api_keys(self) -> AdminAPIKeysResource:
- return AdminAPIKeysResource(self._client)
-
- @cached_property
- def invites(self) -> InvitesResource:
- return InvitesResource(self._client)
-
- @cached_property
- def projects(self) -> ProjectsResource:
- return ProjectsResource(self._client)
-
- @cached_property
- def usage(self) -> UsageResource:
- return UsageResource(self._client)
-
- @cached_property
- def users(self) -> UsersResource:
- return UsersResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> OrganizationResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return OrganizationResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> OrganizationResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return OrganizationResourceWithStreamingResponse(self)
-
- def get_costs(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "line_item"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get costs details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently only `1d` is supported, default
- to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the costs by the specified fields. Support fields include `project_id`,
- `line_item` and any combination of them.
-
- limit: A limit on the number of buckets to be returned. Limit can range between 1 and
- 180, and the default is 7.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only costs for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/costs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- organization_get_costs_params.OrganizationGetCostsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def list_audit_logs(
- self,
- *,
- actor_emails: List[str] | NotGiven = NOT_GIVEN,
- actor_ids: List[str] | NotGiven = NOT_GIVEN,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- effective_at: organization_list_audit_logs_params.EffectiveAt | NotGiven = NOT_GIVEN,
- event_types: List[AuditLogEventType] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- resource_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationListAuditLogsResponse:
- """
- List user actions and configuration changes within this organization.
-
- Args:
- actor_emails: Return only events performed by users with these emails.
-
- actor_ids: Return only events performed by these actors. Can be a user ID, a service
- account ID, or an api key tracking ID.
-
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- effective_at: Return only events whose `effective_at` (Unix seconds) is in this range.
-
- event_types: Return only events with a `type` in one of these values. For example,
- `project.created`. For all options, see the documentation for the
- [audit log object](/docs/api-reference/audit-logs/object).
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- project_ids: Return only events for these projects.
-
- resource_ids: Return only events performed on these targets. For example, a project ID
- updated.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/audit_logs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "actor_emails": actor_emails,
- "actor_ids": actor_ids,
- "after": after,
- "before": before,
- "effective_at": effective_at,
- "event_types": event_types,
- "limit": limit,
- "project_ids": project_ids,
- "resource_ids": resource_ids,
- },
- organization_list_audit_logs_params.OrganizationListAuditLogsParams,
- ),
- ),
- cast_to=OrganizationListAuditLogsResponse,
- )
-
-
-class AsyncOrganizationResource(AsyncAPIResource):
- @cached_property
- def admin_api_keys(self) -> AsyncAdminAPIKeysResource:
- return AsyncAdminAPIKeysResource(self._client)
-
- @cached_property
- def invites(self) -> AsyncInvitesResource:
- return AsyncInvitesResource(self._client)
-
- @cached_property
- def projects(self) -> AsyncProjectsResource:
- return AsyncProjectsResource(self._client)
-
- @cached_property
- def usage(self) -> AsyncUsageResource:
- return AsyncUsageResource(self._client)
-
- @cached_property
- def users(self) -> AsyncUsersResource:
- return AsyncUsersResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncOrganizationResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncOrganizationResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncOrganizationResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncOrganizationResourceWithStreamingResponse(self)
-
- async def get_costs(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "line_item"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get costs details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently only `1d` is supported, default
- to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the costs by the specified fields. Support fields include `project_id`,
- `line_item` and any combination of them.
-
- limit: A limit on the number of buckets to be returned. Limit can range between 1 and
- 180, and the default is 7.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only costs for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/costs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- organization_get_costs_params.OrganizationGetCostsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def list_audit_logs(
- self,
- *,
- actor_emails: List[str] | NotGiven = NOT_GIVEN,
- actor_ids: List[str] | NotGiven = NOT_GIVEN,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- effective_at: organization_list_audit_logs_params.EffectiveAt | NotGiven = NOT_GIVEN,
- event_types: List[AuditLogEventType] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- resource_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationListAuditLogsResponse:
- """
- List user actions and configuration changes within this organization.
-
- Args:
- actor_emails: Return only events performed by users with these emails.
-
- actor_ids: Return only events performed by these actors. Can be a user ID, a service
- account ID, or an api key tracking ID.
-
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- effective_at: Return only events whose `effective_at` (Unix seconds) is in this range.
-
- event_types: Return only events with a `type` in one of these values. For example,
- `project.created`. For all options, see the documentation for the
- [audit log object](/docs/api-reference/audit-logs/object).
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- project_ids: Return only events for these projects.
-
- resource_ids: Return only events performed on these targets. For example, a project ID
- updated.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/audit_logs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "actor_emails": actor_emails,
- "actor_ids": actor_ids,
- "after": after,
- "before": before,
- "effective_at": effective_at,
- "event_types": event_types,
- "limit": limit,
- "project_ids": project_ids,
- "resource_ids": resource_ids,
- },
- organization_list_audit_logs_params.OrganizationListAuditLogsParams,
- ),
- ),
- cast_to=OrganizationListAuditLogsResponse,
- )
-
-
-class OrganizationResourceWithRawResponse:
- def __init__(self, organization: OrganizationResource) -> None:
- self._organization = organization
-
- self.get_costs = to_raw_response_wrapper(
- organization.get_costs,
- )
- self.list_audit_logs = to_raw_response_wrapper(
- organization.list_audit_logs,
- )
-
- @cached_property
- def admin_api_keys(self) -> AdminAPIKeysResourceWithRawResponse:
- return AdminAPIKeysResourceWithRawResponse(self._organization.admin_api_keys)
-
- @cached_property
- def invites(self) -> InvitesResourceWithRawResponse:
- return InvitesResourceWithRawResponse(self._organization.invites)
-
- @cached_property
- def projects(self) -> ProjectsResourceWithRawResponse:
- return ProjectsResourceWithRawResponse(self._organization.projects)
-
- @cached_property
- def usage(self) -> UsageResourceWithRawResponse:
- return UsageResourceWithRawResponse(self._organization.usage)
-
- @cached_property
- def users(self) -> UsersResourceWithRawResponse:
- return UsersResourceWithRawResponse(self._organization.users)
-
-
-class AsyncOrganizationResourceWithRawResponse:
- def __init__(self, organization: AsyncOrganizationResource) -> None:
- self._organization = organization
-
- self.get_costs = async_to_raw_response_wrapper(
- organization.get_costs,
- )
- self.list_audit_logs = async_to_raw_response_wrapper(
- organization.list_audit_logs,
- )
-
- @cached_property
- def admin_api_keys(self) -> AsyncAdminAPIKeysResourceWithRawResponse:
- return AsyncAdminAPIKeysResourceWithRawResponse(self._organization.admin_api_keys)
-
- @cached_property
- def invites(self) -> AsyncInvitesResourceWithRawResponse:
- return AsyncInvitesResourceWithRawResponse(self._organization.invites)
-
- @cached_property
- def projects(self) -> AsyncProjectsResourceWithRawResponse:
- return AsyncProjectsResourceWithRawResponse(self._organization.projects)
-
- @cached_property
- def usage(self) -> AsyncUsageResourceWithRawResponse:
- return AsyncUsageResourceWithRawResponse(self._organization.usage)
-
- @cached_property
- def users(self) -> AsyncUsersResourceWithRawResponse:
- return AsyncUsersResourceWithRawResponse(self._organization.users)
-
-
-class OrganizationResourceWithStreamingResponse:
- def __init__(self, organization: OrganizationResource) -> None:
- self._organization = organization
-
- self.get_costs = to_streamed_response_wrapper(
- organization.get_costs,
- )
- self.list_audit_logs = to_streamed_response_wrapper(
- organization.list_audit_logs,
- )
-
- @cached_property
- def admin_api_keys(self) -> AdminAPIKeysResourceWithStreamingResponse:
- return AdminAPIKeysResourceWithStreamingResponse(self._organization.admin_api_keys)
-
- @cached_property
- def invites(self) -> InvitesResourceWithStreamingResponse:
- return InvitesResourceWithStreamingResponse(self._organization.invites)
-
- @cached_property
- def projects(self) -> ProjectsResourceWithStreamingResponse:
- return ProjectsResourceWithStreamingResponse(self._organization.projects)
-
- @cached_property
- def usage(self) -> UsageResourceWithStreamingResponse:
- return UsageResourceWithStreamingResponse(self._organization.usage)
-
- @cached_property
- def users(self) -> UsersResourceWithStreamingResponse:
- return UsersResourceWithStreamingResponse(self._organization.users)
-
-
-class AsyncOrganizationResourceWithStreamingResponse:
- def __init__(self, organization: AsyncOrganizationResource) -> None:
- self._organization = organization
-
- self.get_costs = async_to_streamed_response_wrapper(
- organization.get_costs,
- )
- self.list_audit_logs = async_to_streamed_response_wrapper(
- organization.list_audit_logs,
- )
-
- @cached_property
- def admin_api_keys(self) -> AsyncAdminAPIKeysResourceWithStreamingResponse:
- return AsyncAdminAPIKeysResourceWithStreamingResponse(self._organization.admin_api_keys)
-
- @cached_property
- def invites(self) -> AsyncInvitesResourceWithStreamingResponse:
- return AsyncInvitesResourceWithStreamingResponse(self._organization.invites)
-
- @cached_property
- def projects(self) -> AsyncProjectsResourceWithStreamingResponse:
- return AsyncProjectsResourceWithStreamingResponse(self._organization.projects)
-
- @cached_property
- def usage(self) -> AsyncUsageResourceWithStreamingResponse:
- return AsyncUsageResourceWithStreamingResponse(self._organization.usage)
-
- @cached_property
- def users(self) -> AsyncUsersResourceWithStreamingResponse:
- return AsyncUsersResourceWithStreamingResponse(self._organization.users)
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py b/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py
deleted file mode 100644
index f3ceec3b..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .users import (
- UsersResource,
- AsyncUsersResource,
- UsersResourceWithRawResponse,
- AsyncUsersResourceWithRawResponse,
- UsersResourceWithStreamingResponse,
- AsyncUsersResourceWithStreamingResponse,
-)
-from .api_keys import (
- APIKeysResource,
- AsyncAPIKeysResource,
- APIKeysResourceWithRawResponse,
- AsyncAPIKeysResourceWithRawResponse,
- APIKeysResourceWithStreamingResponse,
- AsyncAPIKeysResourceWithStreamingResponse,
-)
-from .projects import (
- ProjectsResource,
- AsyncProjectsResource,
- ProjectsResourceWithRawResponse,
- AsyncProjectsResourceWithRawResponse,
- ProjectsResourceWithStreamingResponse,
- AsyncProjectsResourceWithStreamingResponse,
-)
-from .rate_limits import (
- RateLimitsResource,
- AsyncRateLimitsResource,
- RateLimitsResourceWithRawResponse,
- AsyncRateLimitsResourceWithRawResponse,
- RateLimitsResourceWithStreamingResponse,
- AsyncRateLimitsResourceWithStreamingResponse,
-)
-from .service_accounts import (
- ServiceAccountsResource,
- AsyncServiceAccountsResource,
- ServiceAccountsResourceWithRawResponse,
- AsyncServiceAccountsResourceWithRawResponse,
- ServiceAccountsResourceWithStreamingResponse,
- AsyncServiceAccountsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "APIKeysResource",
- "AsyncAPIKeysResource",
- "APIKeysResourceWithRawResponse",
- "AsyncAPIKeysResourceWithRawResponse",
- "APIKeysResourceWithStreamingResponse",
- "AsyncAPIKeysResourceWithStreamingResponse",
- "RateLimitsResource",
- "AsyncRateLimitsResource",
- "RateLimitsResourceWithRawResponse",
- "AsyncRateLimitsResourceWithRawResponse",
- "RateLimitsResourceWithStreamingResponse",
- "AsyncRateLimitsResourceWithStreamingResponse",
- "ServiceAccountsResource",
- "AsyncServiceAccountsResource",
- "ServiceAccountsResourceWithRawResponse",
- "AsyncServiceAccountsResourceWithRawResponse",
- "ServiceAccountsResourceWithStreamingResponse",
- "AsyncServiceAccountsResourceWithStreamingResponse",
- "UsersResource",
- "AsyncUsersResource",
- "UsersResourceWithRawResponse",
- "AsyncUsersResourceWithRawResponse",
- "UsersResourceWithStreamingResponse",
- "AsyncUsersResourceWithStreamingResponse",
- "ProjectsResource",
- "AsyncProjectsResource",
- "ProjectsResourceWithRawResponse",
- "AsyncProjectsResourceWithRawResponse",
- "ProjectsResourceWithStreamingResponse",
- "AsyncProjectsResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py b/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py
deleted file mode 100644
index c5907765..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py
+++ /dev/null
@@ -1,375 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.organization.projects import api_key_list_params
-from ....types.organization.projects.api_key import APIKey
-from ....types.organization.projects.api_key_list_response import APIKeyListResponse
-from ....types.organization.projects.api_key_delete_response import APIKeyDeleteResponse
-
-__all__ = ["APIKeysResource", "AsyncAPIKeysResource"]
-
-
-class APIKeysResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> APIKeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return APIKeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return APIKeysResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- key_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKey:
- """
- Retrieves an API key in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=APIKey,
- )
-
- def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKeyListResponse:
- """
- Returns a list of API keys in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/api_keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- api_key_list_params.APIKeyListParams,
- ),
- ),
- cast_to=APIKeyListResponse,
- )
-
- def delete(
- self,
- key_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKeyDeleteResponse:
- """
- Deletes an API key from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return self._delete(
- f"/organization/projects/{project_id}/api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=APIKeyDeleteResponse,
- )
-
-
-class AsyncAPIKeysResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncAPIKeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncAPIKeysResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- key_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKey:
- """
- Retrieves an API key in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=APIKey,
- )
-
- async def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKeyListResponse:
- """
- Returns a list of API keys in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/api_keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- api_key_list_params.APIKeyListParams,
- ),
- ),
- cast_to=APIKeyListResponse,
- )
-
- async def delete(
- self,
- key_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> APIKeyDeleteResponse:
- """
- Deletes an API key from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not key_id:
- raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}")
- return await self._delete(
- f"/organization/projects/{project_id}/api_keys/{key_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=APIKeyDeleteResponse,
- )
-
-
-class APIKeysResourceWithRawResponse:
- def __init__(self, api_keys: APIKeysResource) -> None:
- self._api_keys = api_keys
-
- self.retrieve = to_raw_response_wrapper(
- api_keys.retrieve,
- )
- self.list = to_raw_response_wrapper(
- api_keys.list,
- )
- self.delete = to_raw_response_wrapper(
- api_keys.delete,
- )
-
-
-class AsyncAPIKeysResourceWithRawResponse:
- def __init__(self, api_keys: AsyncAPIKeysResource) -> None:
- self._api_keys = api_keys
-
- self.retrieve = async_to_raw_response_wrapper(
- api_keys.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- api_keys.list,
- )
- self.delete = async_to_raw_response_wrapper(
- api_keys.delete,
- )
-
-
-class APIKeysResourceWithStreamingResponse:
- def __init__(self, api_keys: APIKeysResource) -> None:
- self._api_keys = api_keys
-
- self.retrieve = to_streamed_response_wrapper(
- api_keys.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- api_keys.list,
- )
- self.delete = to_streamed_response_wrapper(
- api_keys.delete,
- )
-
-
-class AsyncAPIKeysResourceWithStreamingResponse:
- def __init__(self, api_keys: AsyncAPIKeysResource) -> None:
- self._api_keys = api_keys
-
- self.retrieve = async_to_streamed_response_wrapper(
- api_keys.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- api_keys.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- api_keys.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/projects.py b/src/digitalocean_genai_sdk/resources/organization/projects/projects.py
deleted file mode 100644
index 93e42de8..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/projects.py
+++ /dev/null
@@ -1,670 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from .users import (
- UsersResource,
- AsyncUsersResource,
- UsersResourceWithRawResponse,
- AsyncUsersResourceWithRawResponse,
- UsersResourceWithStreamingResponse,
- AsyncUsersResourceWithStreamingResponse,
-)
-from .api_keys import (
- APIKeysResource,
- AsyncAPIKeysResource,
- APIKeysResourceWithRawResponse,
- AsyncAPIKeysResourceWithRawResponse,
- APIKeysResourceWithStreamingResponse,
- AsyncAPIKeysResourceWithStreamingResponse,
-)
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from .rate_limits import (
- RateLimitsResource,
- AsyncRateLimitsResource,
- RateLimitsResourceWithRawResponse,
- AsyncRateLimitsResourceWithRawResponse,
- RateLimitsResourceWithStreamingResponse,
- AsyncRateLimitsResourceWithStreamingResponse,
-)
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from .service_accounts import (
- ServiceAccountsResource,
- AsyncServiceAccountsResource,
- ServiceAccountsResourceWithRawResponse,
- AsyncServiceAccountsResourceWithRawResponse,
- ServiceAccountsResourceWithStreamingResponse,
- AsyncServiceAccountsResourceWithStreamingResponse,
-)
-from ....types.organization import project_list_params, project_create_params, project_update_params
-from ....types.organization.project import Project
-from ....types.organization.project_list_response import ProjectListResponse
-
-__all__ = ["ProjectsResource", "AsyncProjectsResource"]
-
-
-class ProjectsResource(SyncAPIResource):
- @cached_property
- def api_keys(self) -> APIKeysResource:
- return APIKeysResource(self._client)
-
- @cached_property
- def rate_limits(self) -> RateLimitsResource:
- return RateLimitsResource(self._client)
-
- @cached_property
- def service_accounts(self) -> ServiceAccountsResource:
- return ServiceAccountsResource(self._client)
-
- @cached_property
- def users(self) -> UsersResource:
- return UsersResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> ProjectsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ProjectsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ProjectsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ProjectsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """Create a new project in the organization.
-
- Projects can be created and archived,
- but cannot be deleted.
-
- Args:
- name: The friendly name of the project, this name appears in reports.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/organization/projects",
- body=maybe_transform({"name": name}, project_create_params.ProjectCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- def retrieve(
- self,
- project_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """
- Retrieves a project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._get(
- f"/organization/projects/{project_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- def update(
- self,
- project_id: str,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """
- Modifies a project in the organization.
-
- Args:
- name: The updated name of the project, this name appears in reports.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._post(
- f"/organization/projects/{project_id}",
- body=maybe_transform({"name": name}, project_update_params.ProjectUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- include_archived: bool | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectListResponse:
- """Returns a list of projects.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- include_archived: If `true` returns all projects including those that have been `archived`.
- Archived projects are not included by default.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/projects",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "include_archived": include_archived,
- "limit": limit,
- },
- project_list_params.ProjectListParams,
- ),
- ),
- cast_to=ProjectListResponse,
- )
-
- def archive(
- self,
- project_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """Archives a project in the organization.
-
- Archived projects cannot be used or
- updated.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._post(
- f"/organization/projects/{project_id}/archive",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
-
-class AsyncProjectsResource(AsyncAPIResource):
- @cached_property
- def api_keys(self) -> AsyncAPIKeysResource:
- return AsyncAPIKeysResource(self._client)
-
- @cached_property
- def rate_limits(self) -> AsyncRateLimitsResource:
- return AsyncRateLimitsResource(self._client)
-
- @cached_property
- def service_accounts(self) -> AsyncServiceAccountsResource:
- return AsyncServiceAccountsResource(self._client)
-
- @cached_property
- def users(self) -> AsyncUsersResource:
- return AsyncUsersResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncProjectsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncProjectsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncProjectsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncProjectsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """Create a new project in the organization.
-
- Projects can be created and archived,
- but cannot be deleted.
-
- Args:
- name: The friendly name of the project, this name appears in reports.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/organization/projects",
- body=await async_maybe_transform({"name": name}, project_create_params.ProjectCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- async def retrieve(
- self,
- project_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """
- Retrieves a project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- async def update(
- self,
- project_id: str,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """
- Modifies a project in the organization.
-
- Args:
- name: The updated name of the project, this name appears in reports.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}",
- body=await async_maybe_transform({"name": name}, project_update_params.ProjectUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- include_archived: bool | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectListResponse:
- """Returns a list of projects.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- include_archived: If `true` returns all projects including those that have been `archived`.
- Archived projects are not included by default.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/projects",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "include_archived": include_archived,
- "limit": limit,
- },
- project_list_params.ProjectListParams,
- ),
- ),
- cast_to=ProjectListResponse,
- )
-
- async def archive(
- self,
- project_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Project:
- """Archives a project in the organization.
-
- Archived projects cannot be used or
- updated.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}/archive",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Project,
- )
-
-
-class ProjectsResourceWithRawResponse:
- def __init__(self, projects: ProjectsResource) -> None:
- self._projects = projects
-
- self.create = to_raw_response_wrapper(
- projects.create,
- )
- self.retrieve = to_raw_response_wrapper(
- projects.retrieve,
- )
- self.update = to_raw_response_wrapper(
- projects.update,
- )
- self.list = to_raw_response_wrapper(
- projects.list,
- )
- self.archive = to_raw_response_wrapper(
- projects.archive,
- )
-
- @cached_property
- def api_keys(self) -> APIKeysResourceWithRawResponse:
- return APIKeysResourceWithRawResponse(self._projects.api_keys)
-
- @cached_property
- def rate_limits(self) -> RateLimitsResourceWithRawResponse:
- return RateLimitsResourceWithRawResponse(self._projects.rate_limits)
-
- @cached_property
- def service_accounts(self) -> ServiceAccountsResourceWithRawResponse:
- return ServiceAccountsResourceWithRawResponse(self._projects.service_accounts)
-
- @cached_property
- def users(self) -> UsersResourceWithRawResponse:
- return UsersResourceWithRawResponse(self._projects.users)
-
-
-class AsyncProjectsResourceWithRawResponse:
- def __init__(self, projects: AsyncProjectsResource) -> None:
- self._projects = projects
-
- self.create = async_to_raw_response_wrapper(
- projects.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- projects.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- projects.update,
- )
- self.list = async_to_raw_response_wrapper(
- projects.list,
- )
- self.archive = async_to_raw_response_wrapper(
- projects.archive,
- )
-
- @cached_property
- def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse:
- return AsyncAPIKeysResourceWithRawResponse(self._projects.api_keys)
-
- @cached_property
- def rate_limits(self) -> AsyncRateLimitsResourceWithRawResponse:
- return AsyncRateLimitsResourceWithRawResponse(self._projects.rate_limits)
-
- @cached_property
- def service_accounts(self) -> AsyncServiceAccountsResourceWithRawResponse:
- return AsyncServiceAccountsResourceWithRawResponse(self._projects.service_accounts)
-
- @cached_property
- def users(self) -> AsyncUsersResourceWithRawResponse:
- return AsyncUsersResourceWithRawResponse(self._projects.users)
-
-
-class ProjectsResourceWithStreamingResponse:
- def __init__(self, projects: ProjectsResource) -> None:
- self._projects = projects
-
- self.create = to_streamed_response_wrapper(
- projects.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- projects.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- projects.update,
- )
- self.list = to_streamed_response_wrapper(
- projects.list,
- )
- self.archive = to_streamed_response_wrapper(
- projects.archive,
- )
-
- @cached_property
- def api_keys(self) -> APIKeysResourceWithStreamingResponse:
- return APIKeysResourceWithStreamingResponse(self._projects.api_keys)
-
- @cached_property
- def rate_limits(self) -> RateLimitsResourceWithStreamingResponse:
- return RateLimitsResourceWithStreamingResponse(self._projects.rate_limits)
-
- @cached_property
- def service_accounts(self) -> ServiceAccountsResourceWithStreamingResponse:
- return ServiceAccountsResourceWithStreamingResponse(self._projects.service_accounts)
-
- @cached_property
- def users(self) -> UsersResourceWithStreamingResponse:
- return UsersResourceWithStreamingResponse(self._projects.users)
-
-
-class AsyncProjectsResourceWithStreamingResponse:
- def __init__(self, projects: AsyncProjectsResource) -> None:
- self._projects = projects
-
- self.create = async_to_streamed_response_wrapper(
- projects.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- projects.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- projects.update,
- )
- self.list = async_to_streamed_response_wrapper(
- projects.list,
- )
- self.archive = async_to_streamed_response_wrapper(
- projects.archive,
- )
-
- @cached_property
- def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse:
- return AsyncAPIKeysResourceWithStreamingResponse(self._projects.api_keys)
-
- @cached_property
- def rate_limits(self) -> AsyncRateLimitsResourceWithStreamingResponse:
- return AsyncRateLimitsResourceWithStreamingResponse(self._projects.rate_limits)
-
- @cached_property
- def service_accounts(self) -> AsyncServiceAccountsResourceWithStreamingResponse:
- return AsyncServiceAccountsResourceWithStreamingResponse(self._projects.service_accounts)
-
- @cached_property
- def users(self) -> AsyncUsersResourceWithStreamingResponse:
- return AsyncUsersResourceWithStreamingResponse(self._projects.users)
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py b/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py
deleted file mode 100644
index 9c9dce7b..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py
+++ /dev/null
@@ -1,360 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.organization.projects import rate_limit_list_params, rate_limit_update_params
-from ....types.organization.projects.rate_limit import RateLimit
-from ....types.organization.projects.rate_limit_list_response import RateLimitListResponse
-
-__all__ = ["RateLimitsResource", "AsyncRateLimitsResource"]
-
-
-class RateLimitsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> RateLimitsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return RateLimitsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> RateLimitsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return RateLimitsResourceWithStreamingResponse(self)
-
- def update(
- self,
- rate_limit_id: str,
- *,
- project_id: str,
- batch_1_day_max_input_tokens: int | NotGiven = NOT_GIVEN,
- max_audio_megabytes_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_images_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_requests_per_1_day: int | NotGiven = NOT_GIVEN,
- max_requests_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_tokens_per_1_minute: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RateLimit:
- """
- Updates a project rate limit.
-
- Args:
- batch_1_day_max_input_tokens: The maximum batch input tokens per day. Only relevant for certain models.
-
- max_audio_megabytes_per_1_minute: The maximum audio megabytes per minute. Only relevant for certain models.
-
- max_images_per_1_minute: The maximum images per minute. Only relevant for certain models.
-
- max_requests_per_1_day: The maximum requests per day. Only relevant for certain models.
-
- max_requests_per_1_minute: The maximum requests per minute.
-
- max_tokens_per_1_minute: The maximum tokens per minute.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not rate_limit_id:
- raise ValueError(f"Expected a non-empty value for `rate_limit_id` but received {rate_limit_id!r}")
- return self._post(
- f"/organization/projects/{project_id}/rate_limits/{rate_limit_id}",
- body=maybe_transform(
- {
- "batch_1_day_max_input_tokens": batch_1_day_max_input_tokens,
- "max_audio_megabytes_per_1_minute": max_audio_megabytes_per_1_minute,
- "max_images_per_1_minute": max_images_per_1_minute,
- "max_requests_per_1_day": max_requests_per_1_day,
- "max_requests_per_1_minute": max_requests_per_1_minute,
- "max_tokens_per_1_minute": max_tokens_per_1_minute,
- },
- rate_limit_update_params.RateLimitUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RateLimit,
- )
-
- def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RateLimitListResponse:
- """
- Returns the rate limits per model for a project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- beginning with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. The default is 100.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/rate_limits",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- },
- rate_limit_list_params.RateLimitListParams,
- ),
- ),
- cast_to=RateLimitListResponse,
- )
-
-
-class AsyncRateLimitsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncRateLimitsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncRateLimitsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncRateLimitsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncRateLimitsResourceWithStreamingResponse(self)
-
- async def update(
- self,
- rate_limit_id: str,
- *,
- project_id: str,
- batch_1_day_max_input_tokens: int | NotGiven = NOT_GIVEN,
- max_audio_megabytes_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_images_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_requests_per_1_day: int | NotGiven = NOT_GIVEN,
- max_requests_per_1_minute: int | NotGiven = NOT_GIVEN,
- max_tokens_per_1_minute: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RateLimit:
- """
- Updates a project rate limit.
-
- Args:
- batch_1_day_max_input_tokens: The maximum batch input tokens per day. Only relevant for certain models.
-
- max_audio_megabytes_per_1_minute: The maximum audio megabytes per minute. Only relevant for certain models.
-
- max_images_per_1_minute: The maximum images per minute. Only relevant for certain models.
-
- max_requests_per_1_day: The maximum requests per day. Only relevant for certain models.
-
- max_requests_per_1_minute: The maximum requests per minute.
-
- max_tokens_per_1_minute: The maximum tokens per minute.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not rate_limit_id:
- raise ValueError(f"Expected a non-empty value for `rate_limit_id` but received {rate_limit_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}/rate_limits/{rate_limit_id}",
- body=await async_maybe_transform(
- {
- "batch_1_day_max_input_tokens": batch_1_day_max_input_tokens,
- "max_audio_megabytes_per_1_minute": max_audio_megabytes_per_1_minute,
- "max_images_per_1_minute": max_images_per_1_minute,
- "max_requests_per_1_day": max_requests_per_1_day,
- "max_requests_per_1_minute": max_requests_per_1_minute,
- "max_tokens_per_1_minute": max_tokens_per_1_minute,
- },
- rate_limit_update_params.RateLimitUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RateLimit,
- )
-
- async def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RateLimitListResponse:
- """
- Returns the rate limits per model for a project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- beginning with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. The default is 100.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/rate_limits",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- },
- rate_limit_list_params.RateLimitListParams,
- ),
- ),
- cast_to=RateLimitListResponse,
- )
-
-
-class RateLimitsResourceWithRawResponse:
- def __init__(self, rate_limits: RateLimitsResource) -> None:
- self._rate_limits = rate_limits
-
- self.update = to_raw_response_wrapper(
- rate_limits.update,
- )
- self.list = to_raw_response_wrapper(
- rate_limits.list,
- )
-
-
-class AsyncRateLimitsResourceWithRawResponse:
- def __init__(self, rate_limits: AsyncRateLimitsResource) -> None:
- self._rate_limits = rate_limits
-
- self.update = async_to_raw_response_wrapper(
- rate_limits.update,
- )
- self.list = async_to_raw_response_wrapper(
- rate_limits.list,
- )
-
-
-class RateLimitsResourceWithStreamingResponse:
- def __init__(self, rate_limits: RateLimitsResource) -> None:
- self._rate_limits = rate_limits
-
- self.update = to_streamed_response_wrapper(
- rate_limits.update,
- )
- self.list = to_streamed_response_wrapper(
- rate_limits.list,
- )
-
-
-class AsyncRateLimitsResourceWithStreamingResponse:
- def __init__(self, rate_limits: AsyncRateLimitsResource) -> None:
- self._rate_limits = rate_limits
-
- self.update = async_to_streamed_response_wrapper(
- rate_limits.update,
- )
- self.list = async_to_streamed_response_wrapper(
- rate_limits.list,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py b/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py
deleted file mode 100644
index 8957a81d..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py
+++ /dev/null
@@ -1,466 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.organization.projects import service_account_list_params, service_account_create_params
-from ....types.organization.projects.service_account import ServiceAccount
-from ....types.organization.projects.service_account_list_response import ServiceAccountListResponse
-from ....types.organization.projects.service_account_create_response import ServiceAccountCreateResponse
-from ....types.organization.projects.service_account_delete_response import ServiceAccountDeleteResponse
-
-__all__ = ["ServiceAccountsResource", "AsyncServiceAccountsResource"]
-
-
-class ServiceAccountsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ServiceAccountsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ServiceAccountsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ServiceAccountsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ServiceAccountsResourceWithStreamingResponse(self)
-
- def create(
- self,
- project_id: str,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountCreateResponse:
- """Creates a new service account in the project.
-
- This also returns an unredacted
- API key for the service account.
-
- Args:
- name: The name of the service account being created.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._post(
- f"/organization/projects/{project_id}/service_accounts",
- body=maybe_transform({"name": name}, service_account_create_params.ServiceAccountCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccountCreateResponse,
- )
-
- def retrieve(
- self,
- service_account_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccount:
- """
- Retrieves a service account in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not service_account_id:
- raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/service_accounts/{service_account_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccount,
- )
-
- def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountListResponse:
- """
- Returns a list of service accounts in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/service_accounts",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- service_account_list_params.ServiceAccountListParams,
- ),
- ),
- cast_to=ServiceAccountListResponse,
- )
-
- def delete(
- self,
- service_account_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountDeleteResponse:
- """
- Deletes a service account from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not service_account_id:
- raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
- return self._delete(
- f"/organization/projects/{project_id}/service_accounts/{service_account_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccountDeleteResponse,
- )
-
-
-class AsyncServiceAccountsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncServiceAccountsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncServiceAccountsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncServiceAccountsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncServiceAccountsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- project_id: str,
- *,
- name: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountCreateResponse:
- """Creates a new service account in the project.
-
- This also returns an unredacted
- API key for the service account.
-
- Args:
- name: The name of the service account being created.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}/service_accounts",
- body=await async_maybe_transform({"name": name}, service_account_create_params.ServiceAccountCreateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccountCreateResponse,
- )
-
- async def retrieve(
- self,
- service_account_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccount:
- """
- Retrieves a service account in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not service_account_id:
- raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/service_accounts/{service_account_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccount,
- )
-
- async def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountListResponse:
- """
- Returns a list of service accounts in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/service_accounts",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- service_account_list_params.ServiceAccountListParams,
- ),
- ),
- cast_to=ServiceAccountListResponse,
- )
-
- async def delete(
- self,
- service_account_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ServiceAccountDeleteResponse:
- """
- Deletes a service account from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not service_account_id:
- raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}")
- return await self._delete(
- f"/organization/projects/{project_id}/service_accounts/{service_account_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ServiceAccountDeleteResponse,
- )
-
-
-class ServiceAccountsResourceWithRawResponse:
- def __init__(self, service_accounts: ServiceAccountsResource) -> None:
- self._service_accounts = service_accounts
-
- self.create = to_raw_response_wrapper(
- service_accounts.create,
- )
- self.retrieve = to_raw_response_wrapper(
- service_accounts.retrieve,
- )
- self.list = to_raw_response_wrapper(
- service_accounts.list,
- )
- self.delete = to_raw_response_wrapper(
- service_accounts.delete,
- )
-
-
-class AsyncServiceAccountsResourceWithRawResponse:
- def __init__(self, service_accounts: AsyncServiceAccountsResource) -> None:
- self._service_accounts = service_accounts
-
- self.create = async_to_raw_response_wrapper(
- service_accounts.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- service_accounts.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- service_accounts.list,
- )
- self.delete = async_to_raw_response_wrapper(
- service_accounts.delete,
- )
-
-
-class ServiceAccountsResourceWithStreamingResponse:
- def __init__(self, service_accounts: ServiceAccountsResource) -> None:
- self._service_accounts = service_accounts
-
- self.create = to_streamed_response_wrapper(
- service_accounts.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- service_accounts.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- service_accounts.list,
- )
- self.delete = to_streamed_response_wrapper(
- service_accounts.delete,
- )
-
-
-class AsyncServiceAccountsResourceWithStreamingResponse:
- def __init__(self, service_accounts: AsyncServiceAccountsResource) -> None:
- self._service_accounts = service_accounts
-
- self.create = async_to_streamed_response_wrapper(
- service_accounts.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- service_accounts.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- service_accounts.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- service_accounts.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/users.py b/src/digitalocean_genai_sdk/resources/organization/projects/users.py
deleted file mode 100644
index e35ff0cf..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/projects/users.py
+++ /dev/null
@@ -1,577 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.organization.projects import user_add_params, user_list_params, user_update_params
-from ....types.organization.projects.project_user import ProjectUser
-from ....types.organization.projects.user_list_response import UserListResponse
-from ....types.organization.projects.user_delete_response import UserDeleteResponse
-
-__all__ = ["UsersResource", "AsyncUsersResource"]
-
-
-class UsersResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> UsersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return UsersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> UsersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return UsersResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- user_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """
- Retrieves a user in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
- def update(
- self,
- user_id: str,
- *,
- project_id: str,
- role: Literal["owner", "member"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """
- Modifies a user's role in the project.
-
- Args:
- role: `owner` or `member`
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._post(
- f"/organization/projects/{project_id}/users/{user_id}",
- body=maybe_transform({"role": role}, user_update_params.UserUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
- def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserListResponse:
- """
- Returns a list of users in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._get(
- f"/organization/projects/{project_id}/users",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- user_list_params.UserListParams,
- ),
- ),
- cast_to=UserListResponse,
- )
-
- def delete(
- self,
- user_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserDeleteResponse:
- """
- Deletes a user from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._delete(
- f"/organization/projects/{project_id}/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UserDeleteResponse,
- )
-
- def add(
- self,
- project_id: str,
- *,
- role: Literal["owner", "member"],
- user_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """Adds a user to the project.
-
- Users must already be members of the organization to
- be added to a project.
-
- Args:
- role: `owner` or `member`
-
- user_id: The ID of the user.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return self._post(
- f"/organization/projects/{project_id}/users",
- body=maybe_transform(
- {
- "role": role,
- "user_id": user_id,
- },
- user_add_params.UserAddParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
-
-class AsyncUsersResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncUsersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncUsersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncUsersResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- user_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """
- Retrieves a user in the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
- async def update(
- self,
- user_id: str,
- *,
- project_id: str,
- role: Literal["owner", "member"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """
- Modifies a user's role in the project.
-
- Args:
- role: `owner` or `member`
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}/users/{user_id}",
- body=await async_maybe_transform({"role": role}, user_update_params.UserUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
- async def list(
- self,
- project_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserListResponse:
- """
- Returns a list of users in the project.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._get(
- f"/organization/projects/{project_id}/users",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "limit": limit,
- },
- user_list_params.UserListParams,
- ),
- ),
- cast_to=UserListResponse,
- )
-
- async def delete(
- self,
- user_id: str,
- *,
- project_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserDeleteResponse:
- """
- Deletes a user from the project.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._delete(
- f"/organization/projects/{project_id}/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UserDeleteResponse,
- )
-
- async def add(
- self,
- project_id: str,
- *,
- role: Literal["owner", "member"],
- user_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ProjectUser:
- """Adds a user to the project.
-
- Users must already be members of the organization to
- be added to a project.
-
- Args:
- role: `owner` or `member`
-
- user_id: The ID of the user.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- return await self._post(
- f"/organization/projects/{project_id}/users",
- body=await async_maybe_transform(
- {
- "role": role,
- "user_id": user_id,
- },
- user_add_params.UserAddParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ProjectUser,
- )
-
-
-class UsersResourceWithRawResponse:
- def __init__(self, users: UsersResource) -> None:
- self._users = users
-
- self.retrieve = to_raw_response_wrapper(
- users.retrieve,
- )
- self.update = to_raw_response_wrapper(
- users.update,
- )
- self.list = to_raw_response_wrapper(
- users.list,
- )
- self.delete = to_raw_response_wrapper(
- users.delete,
- )
- self.add = to_raw_response_wrapper(
- users.add,
- )
-
-
-class AsyncUsersResourceWithRawResponse:
- def __init__(self, users: AsyncUsersResource) -> None:
- self._users = users
-
- self.retrieve = async_to_raw_response_wrapper(
- users.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- users.update,
- )
- self.list = async_to_raw_response_wrapper(
- users.list,
- )
- self.delete = async_to_raw_response_wrapper(
- users.delete,
- )
- self.add = async_to_raw_response_wrapper(
- users.add,
- )
-
-
-class UsersResourceWithStreamingResponse:
- def __init__(self, users: UsersResource) -> None:
- self._users = users
-
- self.retrieve = to_streamed_response_wrapper(
- users.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- users.update,
- )
- self.list = to_streamed_response_wrapper(
- users.list,
- )
- self.delete = to_streamed_response_wrapper(
- users.delete,
- )
- self.add = to_streamed_response_wrapper(
- users.add,
- )
-
-
-class AsyncUsersResourceWithStreamingResponse:
- def __init__(self, users: AsyncUsersResource) -> None:
- self._users = users
-
- self.retrieve = async_to_streamed_response_wrapper(
- users.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- users.update,
- )
- self.list = async_to_streamed_response_wrapper(
- users.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- users.delete,
- )
- self.add = async_to_streamed_response_wrapper(
- users.add,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/usage.py b/src/digitalocean_genai_sdk/resources/organization/usage.py
deleted file mode 100644
index 37d11956..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/usage.py
+++ /dev/null
@@ -1,1543 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.organization import (
- usage_images_params,
- usage_embeddings_params,
- usage_completions_params,
- usage_moderations_params,
- usage_vector_stores_params,
- usage_audio_speeches_params,
- usage_audio_transcriptions_params,
- usage_code_interpreter_sessions_params,
-)
-from ...types.usage_response import UsageResponse
-
-__all__ = ["UsageResource", "AsyncUsageResource"]
-
-
-class UsageResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> UsageResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return UsageResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> UsageResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return UsageResourceWithStreamingResponse(self)
-
- def audio_speeches(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get audio speeches usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/audio_speeches",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_audio_speeches_params.UsageAudioSpeechesParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def audio_transcriptions(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get audio transcriptions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/audio_transcriptions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_audio_transcriptions_params.UsageAudioTranscriptionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def code_interpreter_sessions(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get code interpreter sessions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/code_interpreter_sessions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- usage_code_interpreter_sessions_params.UsageCodeInterpreterSessionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def completions(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- batch: bool | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get completions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- batch: If `true`, return batch jobs only. If `false`, return non-batch jobs only. By
- default, return both.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model`, `batch` or any combination of
- them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/completions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "batch": batch,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_completions_params.UsageCompletionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def embeddings(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get embeddings usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/embeddings",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_embeddings_params.UsageEmbeddingsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def images(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]]
- | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] | NotGiven = NOT_GIVEN,
- sources: List[Literal["image.generation", "image.edit", "image.variation"]] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get images usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any
- combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- sizes: Return only usages for these image sizes. Possible values are `256x256`,
- `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them.
-
- sources: Return only usages for these sources. Possible values are `image.generation`,
- `image.edit`, `image.variation` or any combination of them.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/images",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "sizes": sizes,
- "sources": sources,
- "user_ids": user_ids,
- },
- usage_images_params.UsageImagesParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def moderations(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get moderations usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/moderations",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_moderations_params.UsageModerationsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- def vector_stores(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get vector stores usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/usage/vector_stores",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- usage_vector_stores_params.UsageVectorStoresParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
-
-class AsyncUsageResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncUsageResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncUsageResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncUsageResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncUsageResourceWithStreamingResponse(self)
-
- async def audio_speeches(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get audio speeches usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/audio_speeches",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_audio_speeches_params.UsageAudioSpeechesParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def audio_transcriptions(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get audio transcriptions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/audio_transcriptions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_audio_transcriptions_params.UsageAudioTranscriptionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def code_interpreter_sessions(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get code interpreter sessions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/code_interpreter_sessions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- usage_code_interpreter_sessions_params.UsageCodeInterpreterSessionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def completions(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- batch: bool | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get completions usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- batch: If `true`, return batch jobs only. If `false`, return non-batch jobs only. By
- default, return both.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model`, `batch` or any combination of
- them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/completions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "batch": batch,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_completions_params.UsageCompletionsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def embeddings(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get embeddings usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/embeddings",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_embeddings_params.UsageEmbeddingsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def images(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]]
- | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] | NotGiven = NOT_GIVEN,
- sources: List[Literal["image.generation", "image.edit", "image.variation"]] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get images usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any
- combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- sizes: Return only usages for these image sizes. Possible values are `256x256`,
- `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them.
-
- sources: Return only usages for these sources. Possible values are `image.generation`,
- `image.edit`, `image.variation` or any combination of them.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/images",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "sizes": sizes,
- "sources": sources,
- "user_ids": user_ids,
- },
- usage_images_params.UsageImagesParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def moderations(
- self,
- *,
- start_time: int,
- api_key_ids: List[str] | NotGiven = NOT_GIVEN,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- models: List[str] | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- user_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get moderations usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- api_key_ids: Return only usage for these API keys.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- models: Return only usage for these models.
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- user_ids: Return only usage for these users.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/moderations",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "api_key_ids": api_key_ids,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "models": models,
- "page": page,
- "project_ids": project_ids,
- "user_ids": user_ids,
- },
- usage_moderations_params.UsageModerationsParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
- async def vector_stores(
- self,
- *,
- start_time: int,
- bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN,
- end_time: int | NotGiven = NOT_GIVEN,
- group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- page: str | NotGiven = NOT_GIVEN,
- project_ids: List[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UsageResponse:
- """
- Get vector stores usage details for the organization.
-
- Args:
- start_time: Start time (Unix seconds) of the query time range, inclusive.
-
- bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are
- supported, default to `1d`.
-
- end_time: End time (Unix seconds) of the query time range, exclusive.
-
- group_by: Group the usage data by the specified fields. Support fields include
- `project_id`.
-
- limit: Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
-
- page: A cursor for use in pagination. Corresponding to the `next_page` field from the
- previous response.
-
- project_ids: Return only usage for these projects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/usage/vector_stores",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "start_time": start_time,
- "bucket_width": bucket_width,
- "end_time": end_time,
- "group_by": group_by,
- "limit": limit,
- "page": page,
- "project_ids": project_ids,
- },
- usage_vector_stores_params.UsageVectorStoresParams,
- ),
- ),
- cast_to=UsageResponse,
- )
-
-
-class UsageResourceWithRawResponse:
- def __init__(self, usage: UsageResource) -> None:
- self._usage = usage
-
- self.audio_speeches = to_raw_response_wrapper(
- usage.audio_speeches,
- )
- self.audio_transcriptions = to_raw_response_wrapper(
- usage.audio_transcriptions,
- )
- self.code_interpreter_sessions = to_raw_response_wrapper(
- usage.code_interpreter_sessions,
- )
- self.completions = to_raw_response_wrapper(
- usage.completions,
- )
- self.embeddings = to_raw_response_wrapper(
- usage.embeddings,
- )
- self.images = to_raw_response_wrapper(
- usage.images,
- )
- self.moderations = to_raw_response_wrapper(
- usage.moderations,
- )
- self.vector_stores = to_raw_response_wrapper(
- usage.vector_stores,
- )
-
-
-class AsyncUsageResourceWithRawResponse:
- def __init__(self, usage: AsyncUsageResource) -> None:
- self._usage = usage
-
- self.audio_speeches = async_to_raw_response_wrapper(
- usage.audio_speeches,
- )
- self.audio_transcriptions = async_to_raw_response_wrapper(
- usage.audio_transcriptions,
- )
- self.code_interpreter_sessions = async_to_raw_response_wrapper(
- usage.code_interpreter_sessions,
- )
- self.completions = async_to_raw_response_wrapper(
- usage.completions,
- )
- self.embeddings = async_to_raw_response_wrapper(
- usage.embeddings,
- )
- self.images = async_to_raw_response_wrapper(
- usage.images,
- )
- self.moderations = async_to_raw_response_wrapper(
- usage.moderations,
- )
- self.vector_stores = async_to_raw_response_wrapper(
- usage.vector_stores,
- )
-
-
-class UsageResourceWithStreamingResponse:
- def __init__(self, usage: UsageResource) -> None:
- self._usage = usage
-
- self.audio_speeches = to_streamed_response_wrapper(
- usage.audio_speeches,
- )
- self.audio_transcriptions = to_streamed_response_wrapper(
- usage.audio_transcriptions,
- )
- self.code_interpreter_sessions = to_streamed_response_wrapper(
- usage.code_interpreter_sessions,
- )
- self.completions = to_streamed_response_wrapper(
- usage.completions,
- )
- self.embeddings = to_streamed_response_wrapper(
- usage.embeddings,
- )
- self.images = to_streamed_response_wrapper(
- usage.images,
- )
- self.moderations = to_streamed_response_wrapper(
- usage.moderations,
- )
- self.vector_stores = to_streamed_response_wrapper(
- usage.vector_stores,
- )
-
-
-class AsyncUsageResourceWithStreamingResponse:
- def __init__(self, usage: AsyncUsageResource) -> None:
- self._usage = usage
-
- self.audio_speeches = async_to_streamed_response_wrapper(
- usage.audio_speeches,
- )
- self.audio_transcriptions = async_to_streamed_response_wrapper(
- usage.audio_transcriptions,
- )
- self.code_interpreter_sessions = async_to_streamed_response_wrapper(
- usage.code_interpreter_sessions,
- )
- self.completions = async_to_streamed_response_wrapper(
- usage.completions,
- )
- self.embeddings = async_to_streamed_response_wrapper(
- usage.embeddings,
- )
- self.images = async_to_streamed_response_wrapper(
- usage.images,
- )
- self.moderations = async_to_streamed_response_wrapper(
- usage.moderations,
- )
- self.vector_stores = async_to_streamed_response_wrapper(
- usage.vector_stores,
- )
diff --git a/src/digitalocean_genai_sdk/resources/organization/users.py b/src/digitalocean_genai_sdk/resources/organization/users.py
deleted file mode 100644
index 536e4396..00000000
--- a/src/digitalocean_genai_sdk/resources/organization/users.py
+++ /dev/null
@@ -1,454 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.organization import user_list_params, user_update_params
-from ...types.organization.organization_user import OrganizationUser
-from ...types.organization.user_list_response import UserListResponse
-from ...types.organization.user_delete_response import UserDeleteResponse
-
-__all__ = ["UsersResource", "AsyncUsersResource"]
-
-
-class UsersResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> UsersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return UsersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> UsersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return UsersResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- user_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationUser:
- """
- Retrieves a user by their identifier.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._get(
- f"/organization/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OrganizationUser,
- )
-
- def update(
- self,
- user_id: str,
- *,
- role: Literal["owner", "reader"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationUser:
- """
- Modifies a user's role in the organization.
-
- Args:
- role: `owner` or `reader`
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._post(
- f"/organization/users/{user_id}",
- body=maybe_transform({"role": role}, user_update_params.UserUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OrganizationUser,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- emails: List[str] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserListResponse:
- """
- Lists all of the users in the organization.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- emails: Filter by the email address of users.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/organization/users",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "emails": emails,
- "limit": limit,
- },
- user_list_params.UserListParams,
- ),
- ),
- cast_to=UserListResponse,
- )
-
- def delete(
- self,
- user_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserDeleteResponse:
- """
- Deletes a user from the organization.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return self._delete(
- f"/organization/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UserDeleteResponse,
- )
-
-
-class AsyncUsersResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncUsersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncUsersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncUsersResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- user_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationUser:
- """
- Retrieves a user by their identifier.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._get(
- f"/organization/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OrganizationUser,
- )
-
- async def update(
- self,
- user_id: str,
- *,
- role: Literal["owner", "reader"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OrganizationUser:
- """
- Modifies a user's role in the organization.
-
- Args:
- role: `owner` or `reader`
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._post(
- f"/organization/users/{user_id}",
- body=await async_maybe_transform({"role": role}, user_update_params.UserUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OrganizationUser,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- emails: List[str] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserListResponse:
- """
- Lists all of the users in the organization.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- emails: Filter by the email address of users.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/organization/users",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "emails": emails,
- "limit": limit,
- },
- user_list_params.UserListParams,
- ),
- ),
- cast_to=UserListResponse,
- )
-
- async def delete(
- self,
- user_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UserDeleteResponse:
- """
- Deletes a user from the organization.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not user_id:
- raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}")
- return await self._delete(
- f"/organization/users/{user_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UserDeleteResponse,
- )
-
-
-class UsersResourceWithRawResponse:
- def __init__(self, users: UsersResource) -> None:
- self._users = users
-
- self.retrieve = to_raw_response_wrapper(
- users.retrieve,
- )
- self.update = to_raw_response_wrapper(
- users.update,
- )
- self.list = to_raw_response_wrapper(
- users.list,
- )
- self.delete = to_raw_response_wrapper(
- users.delete,
- )
-
-
-class AsyncUsersResourceWithRawResponse:
- def __init__(self, users: AsyncUsersResource) -> None:
- self._users = users
-
- self.retrieve = async_to_raw_response_wrapper(
- users.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- users.update,
- )
- self.list = async_to_raw_response_wrapper(
- users.list,
- )
- self.delete = async_to_raw_response_wrapper(
- users.delete,
- )
-
-
-class UsersResourceWithStreamingResponse:
- def __init__(self, users: UsersResource) -> None:
- self._users = users
-
- self.retrieve = to_streamed_response_wrapper(
- users.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- users.update,
- )
- self.list = to_streamed_response_wrapper(
- users.list,
- )
- self.delete = to_streamed_response_wrapper(
- users.delete,
- )
-
-
-class AsyncUsersResourceWithStreamingResponse:
- def __init__(self, users: AsyncUsersResource) -> None:
- self._users = users
-
- self.retrieve = async_to_streamed_response_wrapper(
- users.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- users.update,
- )
- self.list = async_to_streamed_response_wrapper(
- users.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- users.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/realtime.py b/src/digitalocean_genai_sdk/resources/realtime.py
deleted file mode 100644
index 4c70a798..00000000
--- a/src/digitalocean_genai_sdk/resources/realtime.py
+++ /dev/null
@@ -1,574 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import realtime_create_session_params, realtime_create_transcription_session_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.voice_ids_shared_param import VoiceIDsSharedParam
-from ..types.realtime_create_session_response import RealtimeCreateSessionResponse
-from ..types.realtime_create_transcription_session_response import RealtimeCreateTranscriptionSessionResponse
-
-__all__ = ["RealtimeResource", "AsyncRealtimeResource"]
-
-
-class RealtimeResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> RealtimeResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return RealtimeResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> RealtimeResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return RealtimeResourceWithStreamingResponse(self)
-
- def create_session(
- self,
- *,
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- input_audio_noise_reduction: realtime_create_session_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
- input_audio_transcription: realtime_create_session_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
- instructions: str | NotGiven = NOT_GIVEN,
- max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
- modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
- model: Literal[
- "gpt-4o-realtime-preview",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-mini-realtime-preview",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- ]
- | NotGiven = NOT_GIVEN,
- output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: str | NotGiven = NOT_GIVEN,
- tools: Iterable[realtime_create_session_params.Tool] | NotGiven = NOT_GIVEN,
- turn_detection: realtime_create_session_params.TurnDetection | NotGiven = NOT_GIVEN,
- voice: VoiceIDsSharedParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RealtimeCreateSessionResponse:
- """
- Create an ephemeral API token for use in client-side applications with the
- Realtime API. Can be configured with the same session parameters as the
- `session.update` client event.
-
- It responds with a session object, plus a `client_secret` key which contains a
- usable ephemeral API token that can be used to authenticate browser clients for
- the Realtime API.
-
- Args:
- input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
- `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
- (mono), and little-endian byte order.
-
- input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
- off. Noise reduction filters audio added to the input audio buffer before it is
- sent to VAD and the model. Filtering the audio can improve VAD and turn
- detection accuracy (reducing false positives) and model performance by improving
- perception of the input audio.
-
- input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
- `null` to turn off once on. Input audio transcription is not native to the
- model, since the model consumes audio directly. Transcription runs
- asynchronously through
- [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
- and should be treated as guidance of input audio content rather than precisely
- what the model heard. The client can optionally set the language and prompt for
- transcription, these offer additional guidance to the transcription service.
-
- instructions: The default system instructions (i.e. system message) prepended to model calls.
- This field allows the client to guide the model on desired responses. The model
- can be instructed on response content and format, (e.g. "be extremely succinct",
- "act friendly", "here are examples of good responses") and on audio behavior
- (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
- instructions are not guaranteed to be followed by the model, but they provide
- guidance to the model on the desired behavior.
-
- Note that the server sets default instructions which will be used if this field
- is not set and are visible in the `session.created` event at the start of the
- session.
-
- max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
- tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
- `inf` for the maximum available tokens for a given model. Defaults to `inf`.
-
- modalities: The set of modalities the model can respond with. To disable audio, set this to
- ["text"].
-
- model: The Realtime model used for this session.
-
- output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
- For `pcm16`, output audio is sampled at a rate of 24kHz.
-
- temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
- temperature of 0.8 is highly recommended for best performance.
-
- tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
- a function.
-
- tools: Tools (functions) available to the model.
-
- turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
- set to `null` to turn off, in which case the client must manually trigger model
- response. Server VAD means that the model will detect the start and end of
- speech based on audio volume and respond at the end of user speech. Semantic VAD
- is more advanced and uses a turn detection model (in conjuction with VAD) to
- semantically estimate whether the user has finished speaking, then dynamically
- sets a timeout based on this probability. For example, if user audio trails off
- with "uhhm", the model will score a low probability of turn end and wait longer
- for the user to continue speaking. This can be useful for more natural
- conversations, but may have a higher latency.
-
- voice: The voice the model uses to respond. Voice cannot be changed during the session
- once the model has responded with audio at least once. Current voice options are
- `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
- `shimmer`, and `verse`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/realtime/sessions",
- body=maybe_transform(
- {
- "input_audio_format": input_audio_format,
- "input_audio_noise_reduction": input_audio_noise_reduction,
- "input_audio_transcription": input_audio_transcription,
- "instructions": instructions,
- "max_response_output_tokens": max_response_output_tokens,
- "modalities": modalities,
- "model": model,
- "output_audio_format": output_audio_format,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "turn_detection": turn_detection,
- "voice": voice,
- },
- realtime_create_session_params.RealtimeCreateSessionParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RealtimeCreateSessionResponse,
- )
-
- def create_transcription_session(
- self,
- *,
- include: List[str] | NotGiven = NOT_GIVEN,
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- input_audio_noise_reduction: realtime_create_transcription_session_params.InputAudioNoiseReduction
- | NotGiven = NOT_GIVEN,
- input_audio_transcription: realtime_create_transcription_session_params.InputAudioTranscription
- | NotGiven = NOT_GIVEN,
- modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
- turn_detection: realtime_create_transcription_session_params.TurnDetection | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RealtimeCreateTranscriptionSessionResponse:
- """
- Create an ephemeral API token for use in client-side applications with the
- Realtime API specifically for realtime transcriptions. Can be configured with
- the same session parameters as the `transcription_session.update` client event.
-
- It responds with a session object, plus a `client_secret` key which contains a
- usable ephemeral API token that can be used to authenticate browser clients for
- the Realtime API.
-
- Args:
- include:
- The set of items to include in the transcription. Current available items are:
-
- - `item.input_audio_transcription.logprobs`
-
- input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
- `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
- (mono), and little-endian byte order.
-
- input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
- off. Noise reduction filters audio added to the input audio buffer before it is
- sent to VAD and the model. Filtering the audio can improve VAD and turn
- detection accuracy (reducing false positives) and model performance by improving
- perception of the input audio.
-
- input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
- language and prompt for transcription, these offer additional guidance to the
- transcription service.
-
- modalities: The set of modalities the model can respond with. To disable audio, set this to
- ["text"].
-
- turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
- set to `null` to turn off, in which case the client must manually trigger model
- response. Server VAD means that the model will detect the start and end of
- speech based on audio volume and respond at the end of user speech. Semantic VAD
- is more advanced and uses a turn detection model (in conjuction with VAD) to
- semantically estimate whether the user has finished speaking, then dynamically
- sets a timeout based on this probability. For example, if user audio trails off
- with "uhhm", the model will score a low probability of turn end and wait longer
- for the user to continue speaking. This can be useful for more natural
- conversations, but may have a higher latency.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/realtime/transcription_sessions",
- body=maybe_transform(
- {
- "include": include,
- "input_audio_format": input_audio_format,
- "input_audio_noise_reduction": input_audio_noise_reduction,
- "input_audio_transcription": input_audio_transcription,
- "modalities": modalities,
- "turn_detection": turn_detection,
- },
- realtime_create_transcription_session_params.RealtimeCreateTranscriptionSessionParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RealtimeCreateTranscriptionSessionResponse,
- )
-
-
-class AsyncRealtimeResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncRealtimeResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncRealtimeResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncRealtimeResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncRealtimeResourceWithStreamingResponse(self)
-
- async def create_session(
- self,
- *,
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- input_audio_noise_reduction: realtime_create_session_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
- input_audio_transcription: realtime_create_session_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
- instructions: str | NotGiven = NOT_GIVEN,
- max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
- modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
- model: Literal[
- "gpt-4o-realtime-preview",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-mini-realtime-preview",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- ]
- | NotGiven = NOT_GIVEN,
- output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: str | NotGiven = NOT_GIVEN,
- tools: Iterable[realtime_create_session_params.Tool] | NotGiven = NOT_GIVEN,
- turn_detection: realtime_create_session_params.TurnDetection | NotGiven = NOT_GIVEN,
- voice: VoiceIDsSharedParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RealtimeCreateSessionResponse:
- """
- Create an ephemeral API token for use in client-side applications with the
- Realtime API. Can be configured with the same session parameters as the
- `session.update` client event.
-
- It responds with a session object, plus a `client_secret` key which contains a
- usable ephemeral API token that can be used to authenticate browser clients for
- the Realtime API.
-
- Args:
- input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
- `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
- (mono), and little-endian byte order.
-
- input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
- off. Noise reduction filters audio added to the input audio buffer before it is
- sent to VAD and the model. Filtering the audio can improve VAD and turn
- detection accuracy (reducing false positives) and model performance by improving
- perception of the input audio.
-
- input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
- `null` to turn off once on. Input audio transcription is not native to the
- model, since the model consumes audio directly. Transcription runs
- asynchronously through
- [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
- and should be treated as guidance of input audio content rather than precisely
- what the model heard. The client can optionally set the language and prompt for
- transcription, these offer additional guidance to the transcription service.
-
- instructions: The default system instructions (i.e. system message) prepended to model calls.
- This field allows the client to guide the model on desired responses. The model
- can be instructed on response content and format, (e.g. "be extremely succinct",
- "act friendly", "here are examples of good responses") and on audio behavior
- (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
- instructions are not guaranteed to be followed by the model, but they provide
- guidance to the model on the desired behavior.
-
- Note that the server sets default instructions which will be used if this field
- is not set and are visible in the `session.created` event at the start of the
- session.
-
- max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
- tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
- `inf` for the maximum available tokens for a given model. Defaults to `inf`.
-
- modalities: The set of modalities the model can respond with. To disable audio, set this to
- ["text"].
-
- model: The Realtime model used for this session.
-
- output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
- For `pcm16`, output audio is sampled at a rate of 24kHz.
-
- temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
- temperature of 0.8 is highly recommended for best performance.
-
- tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
- a function.
-
- tools: Tools (functions) available to the model.
-
- turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
- set to `null` to turn off, in which case the client must manually trigger model
- response. Server VAD means that the model will detect the start and end of
- speech based on audio volume and respond at the end of user speech. Semantic VAD
- is more advanced and uses a turn detection model (in conjuction with VAD) to
- semantically estimate whether the user has finished speaking, then dynamically
- sets a timeout based on this probability. For example, if user audio trails off
- with "uhhm", the model will score a low probability of turn end and wait longer
- for the user to continue speaking. This can be useful for more natural
- conversations, but may have a higher latency.
-
- voice: The voice the model uses to respond. Voice cannot be changed during the session
- once the model has responded with audio at least once. Current voice options are
- `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
- `shimmer`, and `verse`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/realtime/sessions",
- body=await async_maybe_transform(
- {
- "input_audio_format": input_audio_format,
- "input_audio_noise_reduction": input_audio_noise_reduction,
- "input_audio_transcription": input_audio_transcription,
- "instructions": instructions,
- "max_response_output_tokens": max_response_output_tokens,
- "modalities": modalities,
- "model": model,
- "output_audio_format": output_audio_format,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "turn_detection": turn_detection,
- "voice": voice,
- },
- realtime_create_session_params.RealtimeCreateSessionParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RealtimeCreateSessionResponse,
- )
-
- async def create_transcription_session(
- self,
- *,
- include: List[str] | NotGiven = NOT_GIVEN,
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
- input_audio_noise_reduction: realtime_create_transcription_session_params.InputAudioNoiseReduction
- | NotGiven = NOT_GIVEN,
- input_audio_transcription: realtime_create_transcription_session_params.InputAudioTranscription
- | NotGiven = NOT_GIVEN,
- modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
- turn_detection: realtime_create_transcription_session_params.TurnDetection | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RealtimeCreateTranscriptionSessionResponse:
- """
- Create an ephemeral API token for use in client-side applications with the
- Realtime API specifically for realtime transcriptions. Can be configured with
- the same session parameters as the `transcription_session.update` client event.
-
- It responds with a session object, plus a `client_secret` key which contains a
- usable ephemeral API token that can be used to authenticate browser clients for
- the Realtime API.
-
- Args:
- include:
- The set of items to include in the transcription. Current available items are:
-
- - `item.input_audio_transcription.logprobs`
-
- input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
- `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
- (mono), and little-endian byte order.
-
- input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn
- off. Noise reduction filters audio added to the input audio buffer before it is
- sent to VAD and the model. Filtering the audio can improve VAD and turn
- detection accuracy (reducing false positives) and model performance by improving
- perception of the input audio.
-
- input_audio_transcription: Configuration for input audio transcription. The client can optionally set the
- language and prompt for transcription, these offer additional guidance to the
- transcription service.
-
- modalities: The set of modalities the model can respond with. To disable audio, set this to
- ["text"].
-
- turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
- set to `null` to turn off, in which case the client must manually trigger model
- response. Server VAD means that the model will detect the start and end of
- speech based on audio volume and respond at the end of user speech. Semantic VAD
- is more advanced and uses a turn detection model (in conjuction with VAD) to
- semantically estimate whether the user has finished speaking, then dynamically
- sets a timeout based on this probability. For example, if user audio trails off
- with "uhhm", the model will score a low probability of turn end and wait longer
- for the user to continue speaking. This can be useful for more natural
- conversations, but may have a higher latency.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/realtime/transcription_sessions",
- body=await async_maybe_transform(
- {
- "include": include,
- "input_audio_format": input_audio_format,
- "input_audio_noise_reduction": input_audio_noise_reduction,
- "input_audio_transcription": input_audio_transcription,
- "modalities": modalities,
- "turn_detection": turn_detection,
- },
- realtime_create_transcription_session_params.RealtimeCreateTranscriptionSessionParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RealtimeCreateTranscriptionSessionResponse,
- )
-
-
-class RealtimeResourceWithRawResponse:
- def __init__(self, realtime: RealtimeResource) -> None:
- self._realtime = realtime
-
- self.create_session = to_raw_response_wrapper(
- realtime.create_session,
- )
- self.create_transcription_session = to_raw_response_wrapper(
- realtime.create_transcription_session,
- )
-
-
-class AsyncRealtimeResourceWithRawResponse:
- def __init__(self, realtime: AsyncRealtimeResource) -> None:
- self._realtime = realtime
-
- self.create_session = async_to_raw_response_wrapper(
- realtime.create_session,
- )
- self.create_transcription_session = async_to_raw_response_wrapper(
- realtime.create_transcription_session,
- )
-
-
-class RealtimeResourceWithStreamingResponse:
- def __init__(self, realtime: RealtimeResource) -> None:
- self._realtime = realtime
-
- self.create_session = to_streamed_response_wrapper(
- realtime.create_session,
- )
- self.create_transcription_session = to_streamed_response_wrapper(
- realtime.create_transcription_session,
- )
-
-
-class AsyncRealtimeResourceWithStreamingResponse:
- def __init__(self, realtime: AsyncRealtimeResource) -> None:
- self._realtime = realtime
-
- self.create_session = async_to_streamed_response_wrapper(
- realtime.create_session,
- )
- self.create_transcription_session = async_to_streamed_response_wrapper(
- realtime.create_transcription_session,
- )
diff --git a/src/digitalocean_genai_sdk/resources/responses.py b/src/digitalocean_genai_sdk/resources/responses.py
deleted file mode 100644
index 03445cdc..00000000
--- a/src/digitalocean_genai_sdk/resources/responses.py
+++ /dev/null
@@ -1,902 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import response_create_params, response_retrieve_params, response_list_input_items_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.response import Response
-from ..types.includable import Includable
-from ..types.response_list_input_items_response import ResponseListInputItemsResponse
-
-__all__ = ["ResponsesResource", "AsyncResponsesResource"]
-
-
-class ResponsesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ResponsesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ResponsesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ResponsesResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- input: Union[str, Iterable[response_create_params.InputInputItemList]],
- model: Union[
- Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "o1-preview",
- "o1-preview-2024-09-12",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-audio-preview",
- "gpt-4o-audio-preview-2024-10-01",
- "gpt-4o-audio-preview-2024-12-17",
- "gpt-4o-mini-audio-preview",
- "gpt-4o-mini-audio-preview-2024-12-17",
- "gpt-4o-search-preview",
- "gpt-4o-mini-search-preview",
- "gpt-4o-search-preview-2025-03-11",
- "gpt-4o-mini-search-preview-2025-03-11",
- "chatgpt-4o-latest",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- "o1-pro",
- "o1-pro-2025-03-19",
- "computer-use-preview",
- "computer-use-preview-2025-03-11",
- ],
- str,
- ],
- include: Optional[List[Includable]] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
- previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning: Optional[response_create_params.Reasoning] | NotGiven = NOT_GIVEN,
- store: Optional[bool] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
- tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[response_create_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Response:
- """Creates a model response.
-
- Provide [text](/docs/guides/text) or
- [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or
- [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own
- [custom code](/docs/guides/function-calling) or use built-in
- [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search) to use your own data as input for
- the model's response.
-
- Args:
- input: Text, image, or file inputs to the model, used to generate a response.
-
- Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Image inputs](/docs/guides/images)
- - [File inputs](/docs/guides/pdf-files)
- - [Conversation state](/docs/guides/conversation-state)
- - [Function calling](/docs/guides/function-calling)
-
- model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
- wide range of models with different capabilities, performance characteristics,
- and price points. Refer to the [model guide](/docs/models) to browse and compare
- available models.
-
- include: Specify additional output data to include in the model response. Currently
- supported values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
-
- instructions: Inserts a system (or developer) message as the first item in the model's
- context.
-
- When using along with `previous_response_id`, the instructions from a previous
- response will be not be carried over to the next response. This makes it simple
- to swap out system (or developer) messages in new responses.
-
- max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
-
- previous_response_id: The unique ID of the previous response to the model. Use this to create
- multi-turn conversations. Learn more about
- [conversation state](/docs/guides/conversation-state).
-
- reasoning: **o-series models only**
-
- Configuration options for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning).
-
- store: Whether to store the generated model response for later retrieval via API.
-
- stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/responses-streaming) for
- more information.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic. We generally recommend altering this or `top_p` but
- not both.
-
- text: Configuration options for a text response from the model. Can be plain text or
- structured JSON data. Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Structured Outputs](/docs/guides/structured-outputs)
-
- tool_choice: How the model should select which tool (or tools) to use when generating a
- response. See the `tools` parameter to see how to specify which tools the model
- can call.
-
- tools: An array of tools the model may call while generating a response. You can
- specify which tool to use by setting the `tool_choice` parameter.
-
- The two categories of tools you can provide the model are:
-
- - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
- capabilities, like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search). Learn more about
- [built-in tools](/docs/guides/tools).
- - **Function calls (custom tools)**: Functions that are defined by you, enabling
- the model to call your own code. Learn more about
- [function calling](/docs/guides/function-calling).
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- truncation: The truncation strategy to use for the model response.
-
- - `auto`: If the context of this response and previous ones exceeds the model's
- context window size, the model will truncate the response to fit the context
- window by dropping input items in the middle of the conversation.
- - `disabled` (default): If a model response will exceed the context window size
- for a model, the request will fail with a 400 error.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/responses",
- body=maybe_transform(
- {
- "input": input,
- "model": model,
- "include": include,
- "instructions": instructions,
- "max_output_tokens": max_output_tokens,
- "metadata": metadata,
- "parallel_tool_calls": parallel_tool_calls,
- "previous_response_id": previous_response_id,
- "reasoning": reasoning,
- "store": store,
- "stream": stream,
- "temperature": temperature,
- "text": text,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_p": top_p,
- "truncation": truncation,
- "user": user,
- },
- response_create_params.ResponseCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Response,
- )
-
- def retrieve(
- self,
- response_id: str,
- *,
- include: List[Includable] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Response:
- """
- Retrieves a model response with the given ID.
-
- Args:
- include: Specify additional output data to include in the response. Currently supported
- values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- return self._get(
- f"/responses/{response_id}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"include": include}, response_retrieve_params.ResponseRetrieveParams),
- ),
- cast_to=Response,
- )
-
- def delete(
- self,
- response_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
- """
- Deletes a model response with the given ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._delete(
- f"/responses/{response_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- def list_input_items(
- self,
- response_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ResponseListInputItemsResponse:
- """
- Returns a list of input items for a given response.
-
- Args:
- after: An item ID to list items after, used in pagination.
-
- before: An item ID to list items before, used in pagination.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: The order to return the input items in. Default is `asc`.
-
- - `asc`: Return the input items in ascending order.
- - `desc`: Return the input items in descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- return self._get(
- f"/responses/{response_id}/input_items",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- response_list_input_items_params.ResponseListInputItemsParams,
- ),
- ),
- cast_to=ResponseListInputItemsResponse,
- )
-
-
-class AsyncResponsesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncResponsesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncResponsesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncResponsesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- input: Union[str, Iterable[response_create_params.InputInputItemList]],
- model: Union[
- Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "o1-preview",
- "o1-preview-2024-09-12",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-audio-preview",
- "gpt-4o-audio-preview-2024-10-01",
- "gpt-4o-audio-preview-2024-12-17",
- "gpt-4o-mini-audio-preview",
- "gpt-4o-mini-audio-preview-2024-12-17",
- "gpt-4o-search-preview",
- "gpt-4o-mini-search-preview",
- "gpt-4o-search-preview-2025-03-11",
- "gpt-4o-mini-search-preview-2025-03-11",
- "chatgpt-4o-latest",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- "o1-pro",
- "o1-pro-2025-03-19",
- "computer-use-preview",
- "computer-use-preview-2025-03-11",
- ],
- str,
- ],
- include: Optional[List[Includable]] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
- previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
- reasoning: Optional[response_create_params.Reasoning] | NotGiven = NOT_GIVEN,
- store: Optional[bool] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
- tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[response_create_params.Tool] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Response:
- """Creates a model response.
-
- Provide [text](/docs/guides/text) or
- [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or
- [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own
- [custom code](/docs/guides/function-calling) or use built-in
- [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search) to use your own data as input for
- the model's response.
-
- Args:
- input: Text, image, or file inputs to the model, used to generate a response.
-
- Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Image inputs](/docs/guides/images)
- - [File inputs](/docs/guides/pdf-files)
- - [Conversation state](/docs/guides/conversation-state)
- - [Function calling](/docs/guides/function-calling)
-
- model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a
- wide range of models with different capabilities, performance characteristics,
- and price points. Refer to the [model guide](/docs/models) to browse and compare
- available models.
-
- include: Specify additional output data to include in the model response. Currently
- supported values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
-
- instructions: Inserts a system (or developer) message as the first item in the model's
- context.
-
- When using along with `previous_response_id`, the instructions from a previous
- response will be not be carried over to the next response. This makes it simple
- to swap out system (or developer) messages in new responses.
-
- max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
-
- previous_response_id: The unique ID of the previous response to the model. Use this to create
- multi-turn conversations. Learn more about
- [conversation state](/docs/guides/conversation-state).
-
- reasoning: **o-series models only**
-
- Configuration options for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning).
-
- store: Whether to store the generated model response for later retrieval via API.
-
- stream: If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/responses-streaming) for
- more information.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic. We generally recommend altering this or `top_p` but
- not both.
-
- text: Configuration options for a text response from the model. Can be plain text or
- structured JSON data. Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Structured Outputs](/docs/guides/structured-outputs)
-
- tool_choice: How the model should select which tool (or tools) to use when generating a
- response. See the `tools` parameter to see how to specify which tools the model
- can call.
-
- tools: An array of tools the model may call while generating a response. You can
- specify which tool to use by setting the `tool_choice` parameter.
-
- The two categories of tools you can provide the model are:
-
- - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
- capabilities, like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search). Learn more about
- [built-in tools](/docs/guides/tools).
- - **Function calls (custom tools)**: Functions that are defined by you, enabling
- the model to call your own code. Learn more about
- [function calling](/docs/guides/function-calling).
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- truncation: The truncation strategy to use for the model response.
-
- - `auto`: If the context of this response and previous ones exceeds the model's
- context window size, the model will truncate the response to fit the context
- window by dropping input items in the middle of the conversation.
- - `disabled` (default): If a model response will exceed the context window size
- for a model, the request will fail with a 400 error.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/responses",
- body=await async_maybe_transform(
- {
- "input": input,
- "model": model,
- "include": include,
- "instructions": instructions,
- "max_output_tokens": max_output_tokens,
- "metadata": metadata,
- "parallel_tool_calls": parallel_tool_calls,
- "previous_response_id": previous_response_id,
- "reasoning": reasoning,
- "store": store,
- "stream": stream,
- "temperature": temperature,
- "text": text,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_p": top_p,
- "truncation": truncation,
- "user": user,
- },
- response_create_params.ResponseCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Response,
- )
-
- async def retrieve(
- self,
- response_id: str,
- *,
- include: List[Includable] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Response:
- """
- Retrieves a model response with the given ID.
-
- Args:
- include: Specify additional output data to include in the response. Currently supported
- values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- return await self._get(
- f"/responses/{response_id}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {"include": include}, response_retrieve_params.ResponseRetrieveParams
- ),
- ),
- cast_to=Response,
- )
-
- async def delete(
- self,
- response_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
- """
- Deletes a model response with the given ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._delete(
- f"/responses/{response_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- async def list_input_items(
- self,
- response_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ResponseListInputItemsResponse:
- """
- Returns a list of input items for a given response.
-
- Args:
- after: An item ID to list items after, used in pagination.
-
- before: An item ID to list items before, used in pagination.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: The order to return the input items in. Default is `asc`.
-
- - `asc`: Return the input items in ascending order.
- - `desc`: Return the input items in descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not response_id:
- raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
- return await self._get(
- f"/responses/{response_id}/input_items",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- response_list_input_items_params.ResponseListInputItemsParams,
- ),
- ),
- cast_to=ResponseListInputItemsResponse,
- )
-
-
-class ResponsesResourceWithRawResponse:
- def __init__(self, responses: ResponsesResource) -> None:
- self._responses = responses
-
- self.create = to_raw_response_wrapper(
- responses.create,
- )
- self.retrieve = to_raw_response_wrapper(
- responses.retrieve,
- )
- self.delete = to_raw_response_wrapper(
- responses.delete,
- )
- self.list_input_items = to_raw_response_wrapper(
- responses.list_input_items,
- )
-
-
-class AsyncResponsesResourceWithRawResponse:
- def __init__(self, responses: AsyncResponsesResource) -> None:
- self._responses = responses
-
- self.create = async_to_raw_response_wrapper(
- responses.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- responses.retrieve,
- )
- self.delete = async_to_raw_response_wrapper(
- responses.delete,
- )
- self.list_input_items = async_to_raw_response_wrapper(
- responses.list_input_items,
- )
-
-
-class ResponsesResourceWithStreamingResponse:
- def __init__(self, responses: ResponsesResource) -> None:
- self._responses = responses
-
- self.create = to_streamed_response_wrapper(
- responses.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- responses.retrieve,
- )
- self.delete = to_streamed_response_wrapper(
- responses.delete,
- )
- self.list_input_items = to_streamed_response_wrapper(
- responses.list_input_items,
- )
-
-
-class AsyncResponsesResourceWithStreamingResponse:
- def __init__(self, responses: AsyncResponsesResource) -> None:
- self._responses = responses
-
- self.create = async_to_streamed_response_wrapper(
- responses.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- responses.retrieve,
- )
- self.delete = async_to_streamed_response_wrapper(
- responses.delete,
- )
- self.list_input_items = async_to_streamed_response_wrapper(
- responses.list_input_items,
- )
diff --git a/src/digitalocean_genai_sdk/resources/threads/__init__.py b/src/digitalocean_genai_sdk/resources/threads/__init__.py
deleted file mode 100644
index 736b9bd6..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .runs import (
- RunsResource,
- AsyncRunsResource,
- RunsResourceWithRawResponse,
- AsyncRunsResourceWithRawResponse,
- RunsResourceWithStreamingResponse,
- AsyncRunsResourceWithStreamingResponse,
-)
-from .threads import (
- ThreadsResource,
- AsyncThreadsResource,
- ThreadsResourceWithRawResponse,
- AsyncThreadsResourceWithRawResponse,
- ThreadsResourceWithStreamingResponse,
- AsyncThreadsResourceWithStreamingResponse,
-)
-from .messages import (
- MessagesResource,
- AsyncMessagesResource,
- MessagesResourceWithRawResponse,
- AsyncMessagesResourceWithRawResponse,
- MessagesResourceWithStreamingResponse,
- AsyncMessagesResourceWithStreamingResponse,
-)
-
-__all__ = [
- "RunsResource",
- "AsyncRunsResource",
- "RunsResourceWithRawResponse",
- "AsyncRunsResourceWithRawResponse",
- "RunsResourceWithStreamingResponse",
- "AsyncRunsResourceWithStreamingResponse",
- "MessagesResource",
- "AsyncMessagesResource",
- "MessagesResourceWithRawResponse",
- "AsyncMessagesResourceWithRawResponse",
- "MessagesResourceWithStreamingResponse",
- "AsyncMessagesResourceWithStreamingResponse",
- "ThreadsResource",
- "AsyncThreadsResource",
- "ThreadsResourceWithRawResponse",
- "AsyncThreadsResourceWithRawResponse",
- "ThreadsResourceWithStreamingResponse",
- "AsyncThreadsResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/threads/messages.py b/src/digitalocean_genai_sdk/resources/threads/messages.py
deleted file mode 100644
index e62eb94c..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/messages.py
+++ /dev/null
@@ -1,654 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.threads import message_list_params, message_create_params, message_update_params
-from ...types.threads.message_object import MessageObject
-from ...types.threads.message_list_response import MessageListResponse
-from ...types.threads.message_delete_response import MessageDeleteResponse
-
-__all__ = ["MessagesResource", "AsyncMessagesResource"]
-
-
-class MessagesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> MessagesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return MessagesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> MessagesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return MessagesResourceWithStreamingResponse(self)
-
- def create(
- self,
- thread_id: str,
- *,
- content: Union[str, Iterable[message_create_params.ContentArrayOfContentPart]],
- role: Literal["user", "assistant"],
- attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Create a message.
-
- Args:
- content: The text contents of the message.
-
- role:
- The role of the entity that is creating the message. Allowed values include:
-
- - `user`: Indicates the message is sent by an actual user and should be used in
- most cases to represent user-generated messages.
- - `assistant`: Indicates the message is generated by the assistant. Use this
- value to insert messages from the assistant into the conversation.
-
- attachments: A list of files attached to the message, and the tools they should be added to.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._post(
- f"/threads/{thread_id}/messages",
- body=maybe_transform(
- {
- "content": content,
- "role": role,
- "attachments": attachments,
- "metadata": metadata,
- },
- message_create_params.MessageCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- def retrieve(
- self,
- message_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Retrieve a message.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return self._get(
- f"/threads/{thread_id}/messages/{message_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- def update(
- self,
- message_id: str,
- *,
- thread_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Modifies a message.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return self._post(
- f"/threads/{thread_id}/messages/{message_id}",
- body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- def list(
- self,
- thread_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- run_id: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageListResponse:
- """
- Returns a list of messages for a given thread.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- run_id: Filter messages by the run ID that generated them.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._get(
- f"/threads/{thread_id}/messages",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- "run_id": run_id,
- },
- message_list_params.MessageListParams,
- ),
- ),
- cast_to=MessageListResponse,
- )
-
- def delete(
- self,
- message_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageDeleteResponse:
- """
- Deletes a message.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return self._delete(
- f"/threads/{thread_id}/messages/{message_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageDeleteResponse,
- )
-
-
-class AsyncMessagesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncMessagesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncMessagesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncMessagesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncMessagesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- thread_id: str,
- *,
- content: Union[str, Iterable[message_create_params.ContentArrayOfContentPart]],
- role: Literal["user", "assistant"],
- attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Create a message.
-
- Args:
- content: The text contents of the message.
-
- role:
- The role of the entity that is creating the message. Allowed values include:
-
- - `user`: Indicates the message is sent by an actual user and should be used in
- most cases to represent user-generated messages.
- - `assistant`: Indicates the message is generated by the assistant. Use this
- value to insert messages from the assistant into the conversation.
-
- attachments: A list of files attached to the message, and the tools they should be added to.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._post(
- f"/threads/{thread_id}/messages",
- body=await async_maybe_transform(
- {
- "content": content,
- "role": role,
- "attachments": attachments,
- "metadata": metadata,
- },
- message_create_params.MessageCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- async def retrieve(
- self,
- message_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Retrieve a message.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return await self._get(
- f"/threads/{thread_id}/messages/{message_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- async def update(
- self,
- message_id: str,
- *,
- thread_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageObject:
- """
- Modifies a message.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return await self._post(
- f"/threads/{thread_id}/messages/{message_id}",
- body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageObject,
- )
-
- async def list(
- self,
- thread_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- run_id: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageListResponse:
- """
- Returns a list of messages for a given thread.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- run_id: Filter messages by the run ID that generated them.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._get(
- f"/threads/{thread_id}/messages",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- "run_id": run_id,
- },
- message_list_params.MessageListParams,
- ),
- ),
- cast_to=MessageListResponse,
- )
-
- async def delete(
- self,
- message_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MessageDeleteResponse:
- """
- Deletes a message.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not message_id:
- raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
- return await self._delete(
- f"/threads/{thread_id}/messages/{message_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MessageDeleteResponse,
- )
-
-
-class MessagesResourceWithRawResponse:
- def __init__(self, messages: MessagesResource) -> None:
- self._messages = messages
-
- self.create = to_raw_response_wrapper(
- messages.create,
- )
- self.retrieve = to_raw_response_wrapper(
- messages.retrieve,
- )
- self.update = to_raw_response_wrapper(
- messages.update,
- )
- self.list = to_raw_response_wrapper(
- messages.list,
- )
- self.delete = to_raw_response_wrapper(
- messages.delete,
- )
-
-
-class AsyncMessagesResourceWithRawResponse:
- def __init__(self, messages: AsyncMessagesResource) -> None:
- self._messages = messages
-
- self.create = async_to_raw_response_wrapper(
- messages.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- messages.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- messages.update,
- )
- self.list = async_to_raw_response_wrapper(
- messages.list,
- )
- self.delete = async_to_raw_response_wrapper(
- messages.delete,
- )
-
-
-class MessagesResourceWithStreamingResponse:
- def __init__(self, messages: MessagesResource) -> None:
- self._messages = messages
-
- self.create = to_streamed_response_wrapper(
- messages.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- messages.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- messages.update,
- )
- self.list = to_streamed_response_wrapper(
- messages.list,
- )
- self.delete = to_streamed_response_wrapper(
- messages.delete,
- )
-
-
-class AsyncMessagesResourceWithStreamingResponse:
- def __init__(self, messages: AsyncMessagesResource) -> None:
- self._messages = messages
-
- self.create = async_to_streamed_response_wrapper(
- messages.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- messages.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- messages.update,
- )
- self.list = async_to_streamed_response_wrapper(
- messages.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- messages.delete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py b/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py
deleted file mode 100644
index 70942400..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .runs import (
- RunsResource,
- AsyncRunsResource,
- RunsResourceWithRawResponse,
- AsyncRunsResourceWithRawResponse,
- RunsResourceWithStreamingResponse,
- AsyncRunsResourceWithStreamingResponse,
-)
-from .steps import (
- StepsResource,
- AsyncStepsResource,
- StepsResourceWithRawResponse,
- AsyncStepsResourceWithRawResponse,
- StepsResourceWithStreamingResponse,
- AsyncStepsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "StepsResource",
- "AsyncStepsResource",
- "StepsResourceWithRawResponse",
- "AsyncStepsResourceWithRawResponse",
- "StepsResourceWithStreamingResponse",
- "AsyncStepsResourceWithStreamingResponse",
- "RunsResource",
- "AsyncRunsResource",
- "RunsResourceWithRawResponse",
- "AsyncRunsResourceWithRawResponse",
- "RunsResourceWithStreamingResponse",
- "AsyncRunsResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/runs.py b/src/digitalocean_genai_sdk/resources/threads/runs/runs.py
deleted file mode 100644
index a270b7a9..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/runs/runs.py
+++ /dev/null
@@ -1,1427 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from .steps import (
- StepsResource,
- AsyncStepsResource,
- StepsResourceWithRawResponse,
- AsyncStepsResourceWithRawResponse,
- StepsResourceWithStreamingResponse,
- AsyncStepsResourceWithStreamingResponse,
-)
-from ....types import ReasoningEffort
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.threads import (
- run_list_params,
- run_create_params,
- run_update_params,
- run_create_run_params,
- run_submit_tool_outputs_params,
-)
-from ....types.reasoning_effort import ReasoningEffort
-from ....types.threads.run_object import RunObject
-from ....types.threads.run_list_response import RunListResponse
-from ....types.assistant_supported_models import AssistantSupportedModels
-from ....types.create_thread_request_param import CreateThreadRequestParam
-from ....types.threads.truncation_object_param import TruncationObjectParam
-from ....types.threads.create_message_request_param import CreateMessageRequestParam
-from ....types.assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-from ....types.threads.assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam
-
-__all__ = ["RunsResource", "AsyncRunsResource"]
-
-
-class RunsResource(SyncAPIResource):
- @cached_property
- def steps(self) -> StepsResource:
- return StepsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> RunsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return RunsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> RunsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return RunsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- assistant_id: str,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[
- str,
- Literal[
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4.5-preview",
- "gpt-4.5-preview-2025-02-27",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- ],
- None,
- ]
- | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- thread: CreateThreadRequestParam | NotGiven = NOT_GIVEN,
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[run_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Create a thread and run it in one request.
-
- Args:
- assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
-
- instructions: Override the default system message of the assistant. This is useful for
- modifying the behavior on a per-run basis.
-
- max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run. If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
-
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- thread: Options to create a new thread. If no thread is provided when running a request,
- an empty thread will be created.
-
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: Override the tools the assistant can use for this run. This is useful for
- modifying the behavior on a per-run basis.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
- control the intial context window of the run.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/threads/runs",
- body=maybe_transform(
- {
- "assistant_id": assistant_id,
- "instructions": instructions,
- "max_completion_tokens": max_completion_tokens,
- "max_prompt_tokens": max_prompt_tokens,
- "metadata": metadata,
- "model": model,
- "parallel_tool_calls": parallel_tool_calls,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "thread": thread,
- "tool_choice": tool_choice,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- "truncation_strategy": truncation_strategy,
- },
- run_create_params.RunCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- def retrieve(
- self,
- run_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Retrieves a run.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return self._get(
- f"/threads/{thread_id}/runs/{run_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- def update(
- self,
- run_id: str,
- *,
- thread_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Modifies a run.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return self._post(
- f"/threads/{thread_id}/runs/{run_id}",
- body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- def list(
- self,
- thread_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunListResponse:
- """
- Returns a list of runs belonging to a thread.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._get(
- f"/threads/{thread_id}/runs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- run_list_params.RunListParams,
- ),
- ),
- cast_to=RunListResponse,
- )
-
- def cancel(
- self,
- run_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Cancels a run that is `in_progress`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return self._post(
- f"/threads/{thread_id}/runs/{run_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- def create_run(
- self,
- thread_id: str,
- *,
- assistant_id: str,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
- additional_messages: Optional[Iterable[CreateMessageRequestParam]] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[str, AssistantSupportedModels, None] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_create_run_params.Tool]] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Create a run.
-
- Args:
- assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
-
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- additional_instructions: Appends additional instructions at the end of the instructions for the run. This
- is useful for modifying the behavior on a per-run basis without overriding other
- instructions.
-
- additional_messages: Adds additional messages to the thread before creating the run.
-
- instructions: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of
- the assistant. This is useful for modifying the behavior on a per-run basis.
-
- max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run. If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
-
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- tools: Override the tools the assistant can use for this run. This is useful for
- modifying the behavior on a per-run basis.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
- control the intial context window of the run.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._post(
- f"/threads/{thread_id}/runs",
- body=maybe_transform(
- {
- "assistant_id": assistant_id,
- "additional_instructions": additional_instructions,
- "additional_messages": additional_messages,
- "instructions": instructions,
- "max_completion_tokens": max_completion_tokens,
- "max_prompt_tokens": max_prompt_tokens,
- "metadata": metadata,
- "model": model,
- "parallel_tool_calls": parallel_tool_calls,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_p": top_p,
- "truncation_strategy": truncation_strategy,
- },
- run_create_run_params.RunCreateRunParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"include": include}, run_create_run_params.RunCreateRunParams),
- ),
- cast_to=RunObject,
- )
-
- def submit_tool_outputs(
- self,
- run_id: str,
- *,
- thread_id: str,
- tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- When a run has the `status: "requires_action"` and `required_action.type` is
- `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
- tool calls once they're all completed. All outputs must be submitted in a single
- request.
-
- Args:
- tool_outputs: A list of tools for which the outputs are being submitted.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return self._post(
- f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
- body=maybe_transform(
- {
- "tool_outputs": tool_outputs,
- "stream": stream,
- },
- run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
-
-class AsyncRunsResource(AsyncAPIResource):
- @cached_property
- def steps(self) -> AsyncStepsResource:
- return AsyncStepsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncRunsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncRunsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncRunsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncRunsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- assistant_id: str,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[
- str,
- Literal[
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4.5-preview",
- "gpt-4.5-preview-2025-02-27",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- ],
- None,
- ]
- | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- thread: CreateThreadRequestParam | NotGiven = NOT_GIVEN,
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[run_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Create a thread and run it in one request.
-
- Args:
- assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
-
- instructions: Override the default system message of the assistant. This is useful for
- modifying the behavior on a per-run basis.
-
- max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run. If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
-
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- thread: Options to create a new thread. If no thread is provided when running a request,
- an empty thread will be created.
-
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- tool_resources: A set of resources that are used by the assistant's tools. The resources are
- specific to the type of tool. For example, the `code_interpreter` tool requires
- a list of file IDs, while the `file_search` tool requires a list of vector store
- IDs.
-
- tools: Override the tools the assistant can use for this run. This is useful for
- modifying the behavior on a per-run basis.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
- control the intial context window of the run.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/threads/runs",
- body=await async_maybe_transform(
- {
- "assistant_id": assistant_id,
- "instructions": instructions,
- "max_completion_tokens": max_completion_tokens,
- "max_prompt_tokens": max_prompt_tokens,
- "metadata": metadata,
- "model": model,
- "parallel_tool_calls": parallel_tool_calls,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "thread": thread,
- "tool_choice": tool_choice,
- "tool_resources": tool_resources,
- "tools": tools,
- "top_p": top_p,
- "truncation_strategy": truncation_strategy,
- },
- run_create_params.RunCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- async def retrieve(
- self,
- run_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Retrieves a run.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return await self._get(
- f"/threads/{thread_id}/runs/{run_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- async def update(
- self,
- run_id: str,
- *,
- thread_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Modifies a run.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return await self._post(
- f"/threads/{thread_id}/runs/{run_id}",
- body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- async def list(
- self,
- thread_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunListResponse:
- """
- Returns a list of runs belonging to a thread.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._get(
- f"/threads/{thread_id}/runs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- run_list_params.RunListParams,
- ),
- ),
- cast_to=RunListResponse,
- )
-
- async def cancel(
- self,
- run_id: str,
- *,
- thread_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Cancels a run that is `in_progress`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return await self._post(
- f"/threads/{thread_id}/runs/{run_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
- async def create_run(
- self,
- thread_id: str,
- *,
- assistant_id: str,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
- additional_messages: Optional[Iterable[CreateMessageRequestParam]] | NotGiven = NOT_GIVEN,
- instructions: Optional[str] | NotGiven = NOT_GIVEN,
- max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- model: Union[str, AssistantSupportedModels, None] | NotGiven = NOT_GIVEN,
- parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
- reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_create_run_params.Tool]] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- Create a run.
-
- Args:
- assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
-
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- additional_instructions: Appends additional instructions at the end of the instructions for the run. This
- is useful for modifying the behavior on a per-run basis without overriding other
- instructions.
-
- additional_messages: Adds additional messages to the thread before creating the run.
-
- instructions: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of
- the assistant. This is useful for modifying the behavior on a per-run basis.
-
- max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- model: The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run. If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
-
- parallel_tool_calls: Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
-
- reasoning_effort: **o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
-
- response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- tools: Override the tools the assistant can use for this run. This is useful for
- modifying the behavior on a per-run basis.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
-
- truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
- control the intial context window of the run.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._post(
- f"/threads/{thread_id}/runs",
- body=await async_maybe_transform(
- {
- "assistant_id": assistant_id,
- "additional_instructions": additional_instructions,
- "additional_messages": additional_messages,
- "instructions": instructions,
- "max_completion_tokens": max_completion_tokens,
- "max_prompt_tokens": max_prompt_tokens,
- "metadata": metadata,
- "model": model,
- "parallel_tool_calls": parallel_tool_calls,
- "reasoning_effort": reasoning_effort,
- "response_format": response_format,
- "stream": stream,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_p": top_p,
- "truncation_strategy": truncation_strategy,
- },
- run_create_run_params.RunCreateRunParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform({"include": include}, run_create_run_params.RunCreateRunParams),
- ),
- cast_to=RunObject,
- )
-
- async def submit_tool_outputs(
- self,
- run_id: str,
- *,
- thread_id: str,
- tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
- stream: Optional[bool] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunObject:
- """
- When a run has the `status: "requires_action"` and `required_action.type` is
- `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
- tool calls once they're all completed. All outputs must be submitted in a single
- request.
-
- Args:
- tool_outputs: A list of tools for which the outputs are being submitted.
-
- stream: If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return await self._post(
- f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
- body=await async_maybe_transform(
- {
- "tool_outputs": tool_outputs,
- "stream": stream,
- },
- run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RunObject,
- )
-
-
-class RunsResourceWithRawResponse:
- def __init__(self, runs: RunsResource) -> None:
- self._runs = runs
-
- self.create = to_raw_response_wrapper(
- runs.create,
- )
- self.retrieve = to_raw_response_wrapper(
- runs.retrieve,
- )
- self.update = to_raw_response_wrapper(
- runs.update,
- )
- self.list = to_raw_response_wrapper(
- runs.list,
- )
- self.cancel = to_raw_response_wrapper(
- runs.cancel,
- )
- self.create_run = to_raw_response_wrapper(
- runs.create_run,
- )
- self.submit_tool_outputs = to_raw_response_wrapper(
- runs.submit_tool_outputs,
- )
-
- @cached_property
- def steps(self) -> StepsResourceWithRawResponse:
- return StepsResourceWithRawResponse(self._runs.steps)
-
-
-class AsyncRunsResourceWithRawResponse:
- def __init__(self, runs: AsyncRunsResource) -> None:
- self._runs = runs
-
- self.create = async_to_raw_response_wrapper(
- runs.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- runs.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- runs.update,
- )
- self.list = async_to_raw_response_wrapper(
- runs.list,
- )
- self.cancel = async_to_raw_response_wrapper(
- runs.cancel,
- )
- self.create_run = async_to_raw_response_wrapper(
- runs.create_run,
- )
- self.submit_tool_outputs = async_to_raw_response_wrapper(
- runs.submit_tool_outputs,
- )
-
- @cached_property
- def steps(self) -> AsyncStepsResourceWithRawResponse:
- return AsyncStepsResourceWithRawResponse(self._runs.steps)
-
-
-class RunsResourceWithStreamingResponse:
- def __init__(self, runs: RunsResource) -> None:
- self._runs = runs
-
- self.create = to_streamed_response_wrapper(
- runs.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- runs.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- runs.update,
- )
- self.list = to_streamed_response_wrapper(
- runs.list,
- )
- self.cancel = to_streamed_response_wrapper(
- runs.cancel,
- )
- self.create_run = to_streamed_response_wrapper(
- runs.create_run,
- )
- self.submit_tool_outputs = to_streamed_response_wrapper(
- runs.submit_tool_outputs,
- )
-
- @cached_property
- def steps(self) -> StepsResourceWithStreamingResponse:
- return StepsResourceWithStreamingResponse(self._runs.steps)
-
-
-class AsyncRunsResourceWithStreamingResponse:
- def __init__(self, runs: AsyncRunsResource) -> None:
- self._runs = runs
-
- self.create = async_to_streamed_response_wrapper(
- runs.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- runs.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- runs.update,
- )
- self.list = async_to_streamed_response_wrapper(
- runs.list,
- )
- self.cancel = async_to_streamed_response_wrapper(
- runs.cancel,
- )
- self.create_run = async_to_streamed_response_wrapper(
- runs.create_run,
- )
- self.submit_tool_outputs = async_to_streamed_response_wrapper(
- runs.submit_tool_outputs,
- )
-
- @cached_property
- def steps(self) -> AsyncStepsResourceWithStreamingResponse:
- return AsyncStepsResourceWithStreamingResponse(self._runs.steps)
diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/steps.py b/src/digitalocean_genai_sdk/resources/threads/runs/steps.py
deleted file mode 100644
index 2b5ffd09..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/runs/steps.py
+++ /dev/null
@@ -1,375 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.threads.runs import step_list_params, step_retrieve_params
-from ....types.threads.runs.run_step_object import RunStepObject
-from ....types.threads.runs.step_list_response import StepListResponse
-
-__all__ = ["StepsResource", "AsyncStepsResource"]
-
-
-class StepsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> StepsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return StepsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> StepsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return StepsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- step_id: str,
- *,
- thread_id: str,
- run_id: str,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunStepObject:
- """
- Retrieves a run step.
-
- Args:
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- if not step_id:
- raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
- return self._get(
- f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
- ),
- cast_to=RunStepObject,
- )
-
- def list(
- self,
- run_id: str,
- *,
- thread_id: str,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> StepListResponse:
- """
- Returns a list of run steps belonging to a run.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return self._get(
- f"/threads/{thread_id}/runs/{run_id}/steps",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "include": include,
- "limit": limit,
- "order": order,
- },
- step_list_params.StepListParams,
- ),
- ),
- cast_to=StepListResponse,
- )
-
-
-class AsyncStepsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncStepsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncStepsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncStepsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncStepsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- step_id: str,
- *,
- thread_id: str,
- run_id: str,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RunStepObject:
- """
- Retrieves a run step.
-
- Args:
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- if not step_id:
- raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}")
- return await self._get(
- f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
- ),
- cast_to=RunStepObject,
- )
-
- async def list(
- self,
- run_id: str,
- *,
- thread_id: str,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> StepListResponse:
- """
- Returns a list of run steps belonging to a run.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- include: A list of additional fields to include in the response. Currently the only
- supported value is `step_details.tool_calls[*].file_search.results[*].content`
- to fetch the file search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- if not run_id:
- raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
- return await self._get(
- f"/threads/{thread_id}/runs/{run_id}/steps",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "include": include,
- "limit": limit,
- "order": order,
- },
- step_list_params.StepListParams,
- ),
- ),
- cast_to=StepListResponse,
- )
-
-
-class StepsResourceWithRawResponse:
- def __init__(self, steps: StepsResource) -> None:
- self._steps = steps
-
- self.retrieve = to_raw_response_wrapper(
- steps.retrieve,
- )
- self.list = to_raw_response_wrapper(
- steps.list,
- )
-
-
-class AsyncStepsResourceWithRawResponse:
- def __init__(self, steps: AsyncStepsResource) -> None:
- self._steps = steps
-
- self.retrieve = async_to_raw_response_wrapper(
- steps.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- steps.list,
- )
-
-
-class StepsResourceWithStreamingResponse:
- def __init__(self, steps: StepsResource) -> None:
- self._steps = steps
-
- self.retrieve = to_streamed_response_wrapper(
- steps.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- steps.list,
- )
-
-
-class AsyncStepsResourceWithStreamingResponse:
- def __init__(self, steps: AsyncStepsResource) -> None:
- self._steps = steps
-
- self.retrieve = async_to_streamed_response_wrapper(
- steps.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- steps.list,
- )
diff --git a/src/digitalocean_genai_sdk/resources/threads/threads.py b/src/digitalocean_genai_sdk/resources/threads/threads.py
deleted file mode 100644
index 64062ffb..00000000
--- a/src/digitalocean_genai_sdk/resources/threads/threads.py
+++ /dev/null
@@ -1,553 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Iterable, Optional
-
-import httpx
-
-from ...types import thread_create_params, thread_update_params
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from .messages import (
- MessagesResource,
- AsyncMessagesResource,
- MessagesResourceWithRawResponse,
- AsyncMessagesResourceWithRawResponse,
- MessagesResourceWithStreamingResponse,
- AsyncMessagesResourceWithStreamingResponse,
-)
-from ..._compat import cached_property
-from .runs.runs import (
- RunsResource,
- AsyncRunsResource,
- RunsResourceWithRawResponse,
- AsyncRunsResourceWithRawResponse,
- RunsResourceWithStreamingResponse,
- AsyncRunsResourceWithStreamingResponse,
-)
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.thread_object import ThreadObject
-from ...types.thread_delete_response import ThreadDeleteResponse
-from ...types.threads.create_message_request_param import CreateMessageRequestParam
-
-__all__ = ["ThreadsResource", "AsyncThreadsResource"]
-
-
-class ThreadsResource(SyncAPIResource):
- @cached_property
- def runs(self) -> RunsResource:
- return RunsResource(self._client)
-
- @cached_property
- def messages(self) -> MessagesResource:
- return MessagesResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> ThreadsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return ThreadsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ThreadsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return ThreadsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- messages: Iterable[CreateMessageRequestParam] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Create a thread.
-
- Args:
- messages: A list of [messages](/docs/api-reference/messages) to start the thread with.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- tool_resources: A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/threads",
- body=maybe_transform(
- {
- "messages": messages,
- "metadata": metadata,
- "tool_resources": tool_resources,
- },
- thread_create_params.ThreadCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- def retrieve(
- self,
- thread_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Retrieves a thread.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._get(
- f"/threads/{thread_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- def update(
- self,
- thread_id: str,
- *,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Modifies a thread.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- tool_resources: A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._post(
- f"/threads/{thread_id}",
- body=maybe_transform(
- {
- "metadata": metadata,
- "tool_resources": tool_resources,
- },
- thread_update_params.ThreadUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- def delete(
- self,
- thread_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadDeleteResponse:
- """
- Delete a thread.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return self._delete(
- f"/threads/{thread_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadDeleteResponse,
- )
-
-
-class AsyncThreadsResource(AsyncAPIResource):
- @cached_property
- def runs(self) -> AsyncRunsResource:
- return AsyncRunsResource(self._client)
-
- @cached_property
- def messages(self) -> AsyncMessagesResource:
- return AsyncMessagesResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncThreadsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncThreadsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncThreadsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncThreadsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- messages: Iterable[CreateMessageRequestParam] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Create a thread.
-
- Args:
- messages: A list of [messages](/docs/api-reference/messages) to start the thread with.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- tool_resources: A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/threads",
- body=await async_maybe_transform(
- {
- "messages": messages,
- "metadata": metadata,
- "tool_resources": tool_resources,
- },
- thread_create_params.ThreadCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- async def retrieve(
- self,
- thread_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Retrieves a thread.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._get(
- f"/threads/{thread_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- async def update(
- self,
- thread_id: str,
- *,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadObject:
- """
- Modifies a thread.
-
- Args:
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- tool_resources: A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._post(
- f"/threads/{thread_id}",
- body=await async_maybe_transform(
- {
- "metadata": metadata,
- "tool_resources": tool_resources,
- },
- thread_update_params.ThreadUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadObject,
- )
-
- async def delete(
- self,
- thread_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ThreadDeleteResponse:
- """
- Delete a thread.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not thread_id:
- raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
- return await self._delete(
- f"/threads/{thread_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ThreadDeleteResponse,
- )
-
-
-class ThreadsResourceWithRawResponse:
- def __init__(self, threads: ThreadsResource) -> None:
- self._threads = threads
-
- self.create = to_raw_response_wrapper(
- threads.create,
- )
- self.retrieve = to_raw_response_wrapper(
- threads.retrieve,
- )
- self.update = to_raw_response_wrapper(
- threads.update,
- )
- self.delete = to_raw_response_wrapper(
- threads.delete,
- )
-
- @cached_property
- def runs(self) -> RunsResourceWithRawResponse:
- return RunsResourceWithRawResponse(self._threads.runs)
-
- @cached_property
- def messages(self) -> MessagesResourceWithRawResponse:
- return MessagesResourceWithRawResponse(self._threads.messages)
-
-
-class AsyncThreadsResourceWithRawResponse:
- def __init__(self, threads: AsyncThreadsResource) -> None:
- self._threads = threads
-
- self.create = async_to_raw_response_wrapper(
- threads.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- threads.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- threads.update,
- )
- self.delete = async_to_raw_response_wrapper(
- threads.delete,
- )
-
- @cached_property
- def runs(self) -> AsyncRunsResourceWithRawResponse:
- return AsyncRunsResourceWithRawResponse(self._threads.runs)
-
- @cached_property
- def messages(self) -> AsyncMessagesResourceWithRawResponse:
- return AsyncMessagesResourceWithRawResponse(self._threads.messages)
-
-
-class ThreadsResourceWithStreamingResponse:
- def __init__(self, threads: ThreadsResource) -> None:
- self._threads = threads
-
- self.create = to_streamed_response_wrapper(
- threads.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- threads.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- threads.update,
- )
- self.delete = to_streamed_response_wrapper(
- threads.delete,
- )
-
- @cached_property
- def runs(self) -> RunsResourceWithStreamingResponse:
- return RunsResourceWithStreamingResponse(self._threads.runs)
-
- @cached_property
- def messages(self) -> MessagesResourceWithStreamingResponse:
- return MessagesResourceWithStreamingResponse(self._threads.messages)
-
-
-class AsyncThreadsResourceWithStreamingResponse:
- def __init__(self, threads: AsyncThreadsResource) -> None:
- self._threads = threads
-
- self.create = async_to_streamed_response_wrapper(
- threads.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- threads.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- threads.update,
- )
- self.delete = async_to_streamed_response_wrapper(
- threads.delete,
- )
-
- @cached_property
- def runs(self) -> AsyncRunsResourceWithStreamingResponse:
- return AsyncRunsResourceWithStreamingResponse(self._threads.runs)
-
- @cached_property
- def messages(self) -> AsyncMessagesResourceWithStreamingResponse:
- return AsyncMessagesResourceWithStreamingResponse(self._threads.messages)
diff --git a/src/digitalocean_genai_sdk/resources/uploads.py b/src/digitalocean_genai_sdk/resources/uploads.py
deleted file mode 100644
index 30ba91b5..00000000
--- a/src/digitalocean_genai_sdk/resources/uploads.py
+++ /dev/null
@@ -1,573 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Mapping, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import upload_create_params, upload_add_part_params, upload_complete_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.upload import Upload
-from ..types.upload_add_part_response import UploadAddPartResponse
-
-__all__ = ["UploadsResource", "AsyncUploadsResource"]
-
-
-class UploadsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> UploadsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return UploadsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> UploadsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return UploadsResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- bytes: int,
- filename: str,
- mime_type: str,
- purpose: Literal["assistants", "batch", "fine-tune", "vision"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """
- Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that
- you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an
- Upload can accept at most 8 GB in total and expires after an hour after you
- create it.
-
- Once you complete the Upload, we will create a
- [File](/docs/api-reference/files/object) object that contains all the parts you
- uploaded. This File is usable in the rest of our platform as a regular File
- object.
-
- For certain `purpose` values, the correct `mime_type` must be specified. Please
- refer to documentation for the
- [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files).
-
- For guidance on the proper filename extensions for each purpose, please follow
- the documentation on [creating a File](/docs/api-reference/files/create).
-
- Args:
- bytes: The number of bytes in the file you are uploading.
-
- filename: The name of the file to upload.
-
- mime_type: The MIME type of the file.
-
- This must fall within the supported MIME types for your file purpose. See the
- supported MIME types for assistants and vision.
-
- purpose: The intended purpose of the uploaded file.
-
- See the
- [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/uploads",
- body=maybe_transform(
- {
- "bytes": bytes,
- "filename": filename,
- "mime_type": mime_type,
- "purpose": purpose,
- },
- upload_create_params.UploadCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
- def add_part(
- self,
- upload_id: str,
- *,
- data: FileTypes,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UploadAddPartResponse:
- """
- Adds a [Part](/docs/api-reference/uploads/part-object) to an
- [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk
- of bytes from the file you are trying to upload.
-
- Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
- maximum of 8 GB.
-
- It is possible to add multiple Parts in parallel. You can decide the intended
- order of the Parts when you
- [complete the Upload](/docs/api-reference/uploads/complete).
-
- Args:
- data: The chunk of bytes for this Part.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- body = deepcopy_minimal({"data": data})
- files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- f"/uploads/{upload_id}/parts",
- body=maybe_transform(body, upload_add_part_params.UploadAddPartParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UploadAddPartResponse,
- )
-
- def cancel(
- self,
- upload_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """Cancels the Upload.
-
- No Parts may be added after an Upload is cancelled.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- return self._post(
- f"/uploads/{upload_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
- def complete(
- self,
- upload_id: str,
- *,
- part_ids: List[str],
- md5: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """
- Completes the [Upload](/docs/api-reference/uploads/object).
-
- Within the returned Upload object, there is a nested
- [File](/docs/api-reference/files/object) object that is ready to use in the rest
- of the platform.
-
- You can specify the order of the Parts by passing in an ordered list of the Part
- IDs.
-
- The number of bytes uploaded upon completion must match the number of bytes
- initially specified when creating the Upload object. No Parts may be added after
- an Upload is completed.
-
- Args:
- part_ids: The ordered list of Part IDs.
-
- md5: The optional md5 checksum for the file contents to verify if the bytes uploaded
- matches what you expect.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- return self._post(
- f"/uploads/{upload_id}/complete",
- body=maybe_transform(
- {
- "part_ids": part_ids,
- "md5": md5,
- },
- upload_complete_params.UploadCompleteParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
-
-class AsyncUploadsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncUploadsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncUploadsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncUploadsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncUploadsResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- bytes: int,
- filename: str,
- mime_type: str,
- purpose: Literal["assistants", "batch", "fine-tune", "vision"],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """
- Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that
- you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an
- Upload can accept at most 8 GB in total and expires after an hour after you
- create it.
-
- Once you complete the Upload, we will create a
- [File](/docs/api-reference/files/object) object that contains all the parts you
- uploaded. This File is usable in the rest of our platform as a regular File
- object.
-
- For certain `purpose` values, the correct `mime_type` must be specified. Please
- refer to documentation for the
- [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files).
-
- For guidance on the proper filename extensions for each purpose, please follow
- the documentation on [creating a File](/docs/api-reference/files/create).
-
- Args:
- bytes: The number of bytes in the file you are uploading.
-
- filename: The name of the file to upload.
-
- mime_type: The MIME type of the file.
-
- This must fall within the supported MIME types for your file purpose. See the
- supported MIME types for assistants and vision.
-
- purpose: The intended purpose of the uploaded file.
-
- See the
- [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/uploads",
- body=await async_maybe_transform(
- {
- "bytes": bytes,
- "filename": filename,
- "mime_type": mime_type,
- "purpose": purpose,
- },
- upload_create_params.UploadCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
- async def add_part(
- self,
- upload_id: str,
- *,
- data: FileTypes,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> UploadAddPartResponse:
- """
- Adds a [Part](/docs/api-reference/uploads/part-object) to an
- [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk
- of bytes from the file you are trying to upload.
-
- Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
- maximum of 8 GB.
-
- It is possible to add multiple Parts in parallel. You can decide the intended
- order of the Parts when you
- [complete the Upload](/docs/api-reference/uploads/complete).
-
- Args:
- data: The chunk of bytes for this Part.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- body = deepcopy_minimal({"data": data})
- files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- f"/uploads/{upload_id}/parts",
- body=await async_maybe_transform(body, upload_add_part_params.UploadAddPartParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=UploadAddPartResponse,
- )
-
- async def cancel(
- self,
- upload_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """Cancels the Upload.
-
- No Parts may be added after an Upload is cancelled.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- return await self._post(
- f"/uploads/{upload_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
- async def complete(
- self,
- upload_id: str,
- *,
- part_ids: List[str],
- md5: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Upload:
- """
- Completes the [Upload](/docs/api-reference/uploads/object).
-
- Within the returned Upload object, there is a nested
- [File](/docs/api-reference/files/object) object that is ready to use in the rest
- of the platform.
-
- You can specify the order of the Parts by passing in an ordered list of the Part
- IDs.
-
- The number of bytes uploaded upon completion must match the number of bytes
- initially specified when creating the Upload object. No Parts may be added after
- an Upload is completed.
-
- Args:
- part_ids: The ordered list of Part IDs.
-
- md5: The optional md5 checksum for the file contents to verify if the bytes uploaded
- matches what you expect.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not upload_id:
- raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
- return await self._post(
- f"/uploads/{upload_id}/complete",
- body=await async_maybe_transform(
- {
- "part_ids": part_ids,
- "md5": md5,
- },
- upload_complete_params.UploadCompleteParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Upload,
- )
-
-
-class UploadsResourceWithRawResponse:
- def __init__(self, uploads: UploadsResource) -> None:
- self._uploads = uploads
-
- self.create = to_raw_response_wrapper(
- uploads.create,
- )
- self.add_part = to_raw_response_wrapper(
- uploads.add_part,
- )
- self.cancel = to_raw_response_wrapper(
- uploads.cancel,
- )
- self.complete = to_raw_response_wrapper(
- uploads.complete,
- )
-
-
-class AsyncUploadsResourceWithRawResponse:
- def __init__(self, uploads: AsyncUploadsResource) -> None:
- self._uploads = uploads
-
- self.create = async_to_raw_response_wrapper(
- uploads.create,
- )
- self.add_part = async_to_raw_response_wrapper(
- uploads.add_part,
- )
- self.cancel = async_to_raw_response_wrapper(
- uploads.cancel,
- )
- self.complete = async_to_raw_response_wrapper(
- uploads.complete,
- )
-
-
-class UploadsResourceWithStreamingResponse:
- def __init__(self, uploads: UploadsResource) -> None:
- self._uploads = uploads
-
- self.create = to_streamed_response_wrapper(
- uploads.create,
- )
- self.add_part = to_streamed_response_wrapper(
- uploads.add_part,
- )
- self.cancel = to_streamed_response_wrapper(
- uploads.cancel,
- )
- self.complete = to_streamed_response_wrapper(
- uploads.complete,
- )
-
-
-class AsyncUploadsResourceWithStreamingResponse:
- def __init__(self, uploads: AsyncUploadsResource) -> None:
- self._uploads = uploads
-
- self.create = async_to_streamed_response_wrapper(
- uploads.create,
- )
- self.add_part = async_to_streamed_response_wrapper(
- uploads.add_part,
- )
- self.cancel = async_to_streamed_response_wrapper(
- uploads.cancel,
- )
- self.complete = async_to_streamed_response_wrapper(
- uploads.complete,
- )
diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py b/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py
deleted file mode 100644
index a754f147..00000000
--- a/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .files import (
- FilesResource,
- AsyncFilesResource,
- FilesResourceWithRawResponse,
- AsyncFilesResourceWithRawResponse,
- FilesResourceWithStreamingResponse,
- AsyncFilesResourceWithStreamingResponse,
-)
-from .file_batches import (
- FileBatchesResource,
- AsyncFileBatchesResource,
- FileBatchesResourceWithRawResponse,
- AsyncFileBatchesResourceWithRawResponse,
- FileBatchesResourceWithStreamingResponse,
- AsyncFileBatchesResourceWithStreamingResponse,
-)
-from .vector_stores import (
- VectorStoresResource,
- AsyncVectorStoresResource,
- VectorStoresResourceWithRawResponse,
- AsyncVectorStoresResourceWithRawResponse,
- VectorStoresResourceWithStreamingResponse,
- AsyncVectorStoresResourceWithStreamingResponse,
-)
-
-__all__ = [
- "FileBatchesResource",
- "AsyncFileBatchesResource",
- "FileBatchesResourceWithRawResponse",
- "AsyncFileBatchesResourceWithRawResponse",
- "FileBatchesResourceWithStreamingResponse",
- "AsyncFileBatchesResourceWithStreamingResponse",
- "FilesResource",
- "AsyncFilesResource",
- "FilesResourceWithRawResponse",
- "AsyncFilesResourceWithRawResponse",
- "FilesResourceWithStreamingResponse",
- "AsyncFilesResourceWithStreamingResponse",
- "VectorStoresResource",
- "AsyncVectorStoresResource",
- "VectorStoresResourceWithRawResponse",
- "AsyncVectorStoresResourceWithRawResponse",
- "VectorStoresResourceWithStreamingResponse",
- "AsyncVectorStoresResourceWithStreamingResponse",
-]
diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py b/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py
deleted file mode 100644
index 0c4334ce..00000000
--- a/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py
+++ /dev/null
@@ -1,544 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.vector_stores import ChunkingStrategyRequestParam, file_batch_create_params, file_batch_list_files_params
-from ...types.vector_stores.vector_store_file_batch_object import VectorStoreFileBatchObject
-from ...types.vector_stores.chunking_strategy_request_param import ChunkingStrategyRequestParam
-from ...types.vector_stores.list_vector_store_files_response import ListVectorStoreFilesResponse
-
-__all__ = ["FileBatchesResource", "AsyncFileBatchesResource"]
-
-
-class FileBatchesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> FileBatchesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return FileBatchesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> FileBatchesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return FileBatchesResourceWithStreamingResponse(self)
-
- def create(
- self,
- vector_store_id: str,
- *,
- file_ids: List[str],
- attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
- chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """
- Create a vector store file batch.
-
- Args:
- file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should
- use. Useful for tools like `file_search` that can access files.
-
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}/file_batches",
- body=maybe_transform(
- {
- "file_ids": file_ids,
- "attributes": attributes,
- "chunking_strategy": chunking_strategy,
- },
- file_batch_create_params.FileBatchCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- def retrieve(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """
- Retrieves a vector store file batch.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- def cancel(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """Cancel a vector store file batch.
-
- This attempts to cancel the processing of
- files in this batch as soon as possible.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- def list_files(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListVectorStoreFilesResponse:
- """
- Returns a list of vector store files in a batch.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "filter": filter,
- "limit": limit,
- "order": order,
- },
- file_batch_list_files_params.FileBatchListFilesParams,
- ),
- ),
- cast_to=ListVectorStoreFilesResponse,
- )
-
-
-class AsyncFileBatchesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncFileBatchesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncFileBatchesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncFileBatchesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncFileBatchesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- vector_store_id: str,
- *,
- file_ids: List[str],
- attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
- chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """
- Create a vector store file batch.
-
- Args:
- file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should
- use. Useful for tools like `file_search` that can access files.
-
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}/file_batches",
- body=await async_maybe_transform(
- {
- "file_ids": file_ids,
- "attributes": attributes,
- "chunking_strategy": chunking_strategy,
- },
- file_batch_create_params.FileBatchCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- async def retrieve(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """
- Retrieves a vector store file batch.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- async def cancel(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileBatchObject:
- """Cancel a vector store file batch.
-
- This attempts to cancel the processing of
- files in this batch as soon as possible.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatchObject,
- )
-
- async def list_files(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListVectorStoreFilesResponse:
- """
- Returns a list of vector store files in a batch.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "filter": filter,
- "limit": limit,
- "order": order,
- },
- file_batch_list_files_params.FileBatchListFilesParams,
- ),
- ),
- cast_to=ListVectorStoreFilesResponse,
- )
-
-
-class FileBatchesResourceWithRawResponse:
- def __init__(self, file_batches: FileBatchesResource) -> None:
- self._file_batches = file_batches
-
- self.create = to_raw_response_wrapper(
- file_batches.create,
- )
- self.retrieve = to_raw_response_wrapper(
- file_batches.retrieve,
- )
- self.cancel = to_raw_response_wrapper(
- file_batches.cancel,
- )
- self.list_files = to_raw_response_wrapper(
- file_batches.list_files,
- )
-
-
-class AsyncFileBatchesResourceWithRawResponse:
- def __init__(self, file_batches: AsyncFileBatchesResource) -> None:
- self._file_batches = file_batches
-
- self.create = async_to_raw_response_wrapper(
- file_batches.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- file_batches.retrieve,
- )
- self.cancel = async_to_raw_response_wrapper(
- file_batches.cancel,
- )
- self.list_files = async_to_raw_response_wrapper(
- file_batches.list_files,
- )
-
-
-class FileBatchesResourceWithStreamingResponse:
- def __init__(self, file_batches: FileBatchesResource) -> None:
- self._file_batches = file_batches
-
- self.create = to_streamed_response_wrapper(
- file_batches.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- file_batches.retrieve,
- )
- self.cancel = to_streamed_response_wrapper(
- file_batches.cancel,
- )
- self.list_files = to_streamed_response_wrapper(
- file_batches.list_files,
- )
-
-
-class AsyncFileBatchesResourceWithStreamingResponse:
- def __init__(self, file_batches: AsyncFileBatchesResource) -> None:
- self._file_batches = file_batches
-
- self.create = async_to_streamed_response_wrapper(
- file_batches.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- file_batches.retrieve,
- )
- self.cancel = async_to_streamed_response_wrapper(
- file_batches.cancel,
- )
- self.list_files = async_to_streamed_response_wrapper(
- file_batches.list_files,
- )
diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/files.py b/src/digitalocean_genai_sdk/resources/vector_stores/files.py
deleted file mode 100644
index c40d5b11..00000000
--- a/src/digitalocean_genai_sdk/resources/vector_stores/files.py
+++ /dev/null
@@ -1,733 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.vector_stores import (
- ChunkingStrategyRequestParam,
- file_list_params,
- file_create_params,
- file_update_params,
-)
-from ...types.vector_stores.file_delete_response import FileDeleteResponse
-from ...types.vector_stores.vector_store_file_object import VectorStoreFileObject
-from ...types.vector_stores.file_retrieve_content_response import FileRetrieveContentResponse
-from ...types.vector_stores.chunking_strategy_request_param import ChunkingStrategyRequestParam
-from ...types.vector_stores.list_vector_store_files_response import ListVectorStoreFilesResponse
-
-__all__ = ["FilesResource", "AsyncFilesResource"]
-
-
-class FilesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> FilesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return FilesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> FilesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return FilesResourceWithStreamingResponse(self)
-
- def create(
- self,
- vector_store_id: str,
- *,
- file_id: str,
- attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
- chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Create a vector store file by attaching a [File](/docs/api-reference/files) to a
- [vector store](/docs/api-reference/vector-stores/object).
-
- Args:
- file_id: A [File](/docs/api-reference/files) ID that the vector store should use. Useful
- for tools like `file_search` that can access files.
-
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}/files",
- body=maybe_transform(
- {
- "file_id": file_id,
- "attributes": attributes,
- "chunking_strategy": chunking_strategy,
- },
- file_create_params.FileCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- def retrieve(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Retrieves a vector store file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- def update(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- attributes: Optional[Dict[str, Union[str, float, bool]]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Update attributes on a vector store file.
-
- Args:
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- def list(
- self,
- vector_store_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListVectorStoreFilesResponse:
- """
- Returns a list of vector store files.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "filter": filter,
- "limit": limit,
- "order": order,
- },
- file_list_params.FileListParams,
- ),
- ),
- cast_to=ListVectorStoreFilesResponse,
- )
-
- def delete(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileDeleteResponse:
- """Delete a vector store file.
-
- This will remove the file from the vector store but
- the file itself will not be deleted. To delete the file, use the
- [delete file](/docs/api-reference/files/delete) endpoint.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._delete(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileDeleteResponse,
- )
-
- def retrieve_content(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileRetrieveContentResponse:
- """
- Retrieve the parsed contents of a vector store file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}/files/{file_id}/content",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileRetrieveContentResponse,
- )
-
-
-class AsyncFilesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncFilesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncFilesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncFilesResourceWithStreamingResponse(self)
-
- async def create(
- self,
- vector_store_id: str,
- *,
- file_id: str,
- attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
- chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Create a vector store file by attaching a [File](/docs/api-reference/files) to a
- [vector store](/docs/api-reference/vector-stores/object).
-
- Args:
- file_id: A [File](/docs/api-reference/files) ID that the vector store should use. Useful
- for tools like `file_search` that can access files.
-
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}/files",
- body=await async_maybe_transform(
- {
- "file_id": file_id,
- "attributes": attributes,
- "chunking_strategy": chunking_strategy,
- },
- file_create_params.FileCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- async def retrieve(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Retrieves a vector store file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- async def update(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- attributes: Optional[Dict[str, Union[str, float, bool]]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreFileObject:
- """
- Update attributes on a vector store file.
-
- Args:
- attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard. Keys are strings with a maximum
- length of 64 characters. Values are strings with a maximum length of 512
- characters, booleans, or numbers.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileObject,
- )
-
- async def list(
- self,
- vector_store_id: str,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ListVectorStoreFilesResponse:
- """
- Returns a list of vector store files.
-
- Args:
- after: A cursor for use in pagination. `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}/files",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "filter": filter,
- "limit": limit,
- "order": order,
- },
- file_list_params.FileListParams,
- ),
- ),
- cast_to=ListVectorStoreFilesResponse,
- )
-
- async def delete(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileDeleteResponse:
- """Delete a vector store file.
-
- This will remove the file from the vector store but
- the file itself will not be deleted. To delete the file, use the
- [delete file](/docs/api-reference/files/delete) endpoint.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._delete(
- f"/vector_stores/{vector_store_id}/files/{file_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileDeleteResponse,
- )
-
- async def retrieve_content(
- self,
- file_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FileRetrieveContentResponse:
- """
- Retrieve the parsed contents of a vector store file.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not file_id:
- raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}/files/{file_id}/content",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FileRetrieveContentResponse,
- )
-
-
-class FilesResourceWithRawResponse:
- def __init__(self, files: FilesResource) -> None:
- self._files = files
-
- self.create = to_raw_response_wrapper(
- files.create,
- )
- self.retrieve = to_raw_response_wrapper(
- files.retrieve,
- )
- self.update = to_raw_response_wrapper(
- files.update,
- )
- self.list = to_raw_response_wrapper(
- files.list,
- )
- self.delete = to_raw_response_wrapper(
- files.delete,
- )
- self.retrieve_content = to_raw_response_wrapper(
- files.retrieve_content,
- )
-
-
-class AsyncFilesResourceWithRawResponse:
- def __init__(self, files: AsyncFilesResource) -> None:
- self._files = files
-
- self.create = async_to_raw_response_wrapper(
- files.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- files.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- files.update,
- )
- self.list = async_to_raw_response_wrapper(
- files.list,
- )
- self.delete = async_to_raw_response_wrapper(
- files.delete,
- )
- self.retrieve_content = async_to_raw_response_wrapper(
- files.retrieve_content,
- )
-
-
-class FilesResourceWithStreamingResponse:
- def __init__(self, files: FilesResource) -> None:
- self._files = files
-
- self.create = to_streamed_response_wrapper(
- files.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- files.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- files.update,
- )
- self.list = to_streamed_response_wrapper(
- files.list,
- )
- self.delete = to_streamed_response_wrapper(
- files.delete,
- )
- self.retrieve_content = to_streamed_response_wrapper(
- files.retrieve_content,
- )
-
-
-class AsyncFilesResourceWithStreamingResponse:
- def __init__(self, files: AsyncFilesResource) -> None:
- self._files = files
-
- self.create = async_to_streamed_response_wrapper(
- files.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- files.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- files.update,
- )
- self.list = async_to_streamed_response_wrapper(
- files.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- files.delete,
- )
- self.retrieve_content = async_to_streamed_response_wrapper(
- files.retrieve_content,
- )
diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py b/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py
deleted file mode 100644
index 8ad572ea..00000000
--- a/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py
+++ /dev/null
@@ -1,847 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from .files import (
- FilesResource,
- AsyncFilesResource,
- FilesResourceWithRawResponse,
- AsyncFilesResourceWithRawResponse,
- FilesResourceWithStreamingResponse,
- AsyncFilesResourceWithStreamingResponse,
-)
-from ...types import (
- vector_store_list_params,
- vector_store_create_params,
- vector_store_search_params,
- vector_store_update_params,
-)
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .file_batches import (
- FileBatchesResource,
- AsyncFileBatchesResource,
- FileBatchesResourceWithRawResponse,
- AsyncFileBatchesResourceWithRawResponse,
- FileBatchesResourceWithStreamingResponse,
- AsyncFileBatchesResourceWithStreamingResponse,
-)
-from ..._base_client import make_request_options
-from ...types.vector_store_object import VectorStoreObject
-from ...types.vector_store_list_response import VectorStoreListResponse
-from ...types.vector_store_delete_response import VectorStoreDeleteResponse
-from ...types.vector_store_search_response import VectorStoreSearchResponse
-from ...types.vector_store_expiration_after_param import VectorStoreExpirationAfterParam
-
-__all__ = ["VectorStoresResource", "AsyncVectorStoresResource"]
-
-
-class VectorStoresResource(SyncAPIResource):
- @cached_property
- def file_batches(self) -> FileBatchesResource:
- return FileBatchesResource(self._client)
-
- @cached_property
- def files(self) -> FilesResource:
- return FilesResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> VectorStoresResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return VectorStoresResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> VectorStoresResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return VectorStoresResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN,
- expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN,
- file_ids: List[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Create a vector store.
-
- Args:
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy. Only applicable if `file_ids` is non-empty.
-
- expires_after: The expiration policy for a vector store.
-
- file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should
- use. Useful for tools like `file_search` that can access files.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the vector store.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/vector_stores",
- body=maybe_transform(
- {
- "chunking_strategy": chunking_strategy,
- "expires_after": expires_after,
- "file_ids": file_ids,
- "metadata": metadata,
- "name": name,
- },
- vector_store_create_params.VectorStoreCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- def retrieve(
- self,
- vector_store_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Retrieves a vector store.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._get(
- f"/vector_stores/{vector_store_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- def update(
- self,
- vector_store_id: str,
- *,
- expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Modifies a vector store.
-
- Args:
- expires_after: The expiration policy for a vector store.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the vector store.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}",
- body=maybe_transform(
- {
- "expires_after": expires_after,
- "metadata": metadata,
- "name": name,
- },
- vector_store_update_params.VectorStoreUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreListResponse:
- """Returns a list of vector stores.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/vector_stores",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- vector_store_list_params.VectorStoreListParams,
- ),
- ),
- cast_to=VectorStoreListResponse,
- )
-
- def delete(
- self,
- vector_store_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreDeleteResponse:
- """
- Delete a vector store.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._delete(
- f"/vector_stores/{vector_store_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreDeleteResponse,
- )
-
- def search(
- self,
- vector_store_id: str,
- *,
- query: Union[str, List[str]],
- filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN,
- max_num_results: int | NotGiven = NOT_GIVEN,
- ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN,
- rewrite_query: bool | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreSearchResponse:
- """
- Search a vector store for relevant chunks based on a query and file attributes
- filter.
-
- Args:
- query: A query string for a search
-
- filters: A filter to apply based on file attributes.
-
- max_num_results: The maximum number of results to return. This number should be between 1 and 50
- inclusive.
-
- ranking_options: Ranking options for search.
-
- rewrite_query: Whether to rewrite the natural language query for vector search.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return self._post(
- f"/vector_stores/{vector_store_id}/search",
- body=maybe_transform(
- {
- "query": query,
- "filters": filters,
- "max_num_results": max_num_results,
- "ranking_options": ranking_options,
- "rewrite_query": rewrite_query,
- },
- vector_store_search_params.VectorStoreSearchParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreSearchResponse,
- )
-
-
-class AsyncVectorStoresResource(AsyncAPIResource):
- @cached_property
- def file_batches(self) -> AsyncFileBatchesResource:
- return AsyncFileBatchesResource(self._client)
-
- @cached_property
- def files(self) -> AsyncFilesResource:
- return AsyncFilesResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncVectorStoresResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers
- """
- return AsyncVectorStoresResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncVectorStoresResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response
- """
- return AsyncVectorStoresResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN,
- expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN,
- file_ids: List[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Create a vector store.
-
- Args:
- chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
- strategy. Only applicable if `file_ids` is non-empty.
-
- expires_after: The expiration policy for a vector store.
-
- file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should
- use. Useful for tools like `file_search` that can access files.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the vector store.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/vector_stores",
- body=await async_maybe_transform(
- {
- "chunking_strategy": chunking_strategy,
- "expires_after": expires_after,
- "file_ids": file_ids,
- "metadata": metadata,
- "name": name,
- },
- vector_store_create_params.VectorStoreCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- async def retrieve(
- self,
- vector_store_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Retrieves a vector store.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._get(
- f"/vector_stores/{vector_store_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- async def update(
- self,
- vector_store_id: str,
- *,
- expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreObject:
- """
- Modifies a vector store.
-
- Args:
- expires_after: The expiration policy for a vector store.
-
- metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format, and
- querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
-
- name: The name of the vector store.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}",
- body=await async_maybe_transform(
- {
- "expires_after": expires_after,
- "metadata": metadata,
- "name": name,
- },
- vector_store_update_params.VectorStoreUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreObject,
- )
-
- async def list(
- self,
- *,
- after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreListResponse:
- """Returns a list of vector stores.
-
- Args:
- after: A cursor for use in pagination.
-
- `after` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include after=obj_foo in order to
- fetch the next page of the list.
-
- before: A cursor for use in pagination. `before` is an object ID that defines your place
- in the list. For instance, if you make a list request and receive 100 objects,
- starting with obj_foo, your subsequent call can include before=obj_foo in order
- to fetch the previous page of the list.
-
- limit: A limit on the number of objects to be returned. Limit can range between 1 and
- 100, and the default is 20.
-
- order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
- order and `desc` for descending order.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/vector_stores",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "after": after,
- "before": before,
- "limit": limit,
- "order": order,
- },
- vector_store_list_params.VectorStoreListParams,
- ),
- ),
- cast_to=VectorStoreListResponse,
- )
-
- async def delete(
- self,
- vector_store_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreDeleteResponse:
- """
- Delete a vector store.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._delete(
- f"/vector_stores/{vector_store_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreDeleteResponse,
- )
-
- async def search(
- self,
- vector_store_id: str,
- *,
- query: Union[str, List[str]],
- filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN,
- max_num_results: int | NotGiven = NOT_GIVEN,
- ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN,
- rewrite_query: bool | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> VectorStoreSearchResponse:
- """
- Search a vector store for relevant chunks based on a query and file attributes
- filter.
-
- Args:
- query: A query string for a search
-
- filters: A filter to apply based on file attributes.
-
- max_num_results: The maximum number of results to return. This number should be between 1 and 50
- inclusive.
-
- ranking_options: Ranking options for search.
-
- rewrite_query: Whether to rewrite the natural language query for vector search.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- return await self._post(
- f"/vector_stores/{vector_store_id}/search",
- body=await async_maybe_transform(
- {
- "query": query,
- "filters": filters,
- "max_num_results": max_num_results,
- "ranking_options": ranking_options,
- "rewrite_query": rewrite_query,
- },
- vector_store_search_params.VectorStoreSearchParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreSearchResponse,
- )
-
-
-class VectorStoresResourceWithRawResponse:
- def __init__(self, vector_stores: VectorStoresResource) -> None:
- self._vector_stores = vector_stores
-
- self.create = to_raw_response_wrapper(
- vector_stores.create,
- )
- self.retrieve = to_raw_response_wrapper(
- vector_stores.retrieve,
- )
- self.update = to_raw_response_wrapper(
- vector_stores.update,
- )
- self.list = to_raw_response_wrapper(
- vector_stores.list,
- )
- self.delete = to_raw_response_wrapper(
- vector_stores.delete,
- )
- self.search = to_raw_response_wrapper(
- vector_stores.search,
- )
-
- @cached_property
- def file_batches(self) -> FileBatchesResourceWithRawResponse:
- return FileBatchesResourceWithRawResponse(self._vector_stores.file_batches)
-
- @cached_property
- def files(self) -> FilesResourceWithRawResponse:
- return FilesResourceWithRawResponse(self._vector_stores.files)
-
-
-class AsyncVectorStoresResourceWithRawResponse:
- def __init__(self, vector_stores: AsyncVectorStoresResource) -> None:
- self._vector_stores = vector_stores
-
- self.create = async_to_raw_response_wrapper(
- vector_stores.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- vector_stores.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- vector_stores.update,
- )
- self.list = async_to_raw_response_wrapper(
- vector_stores.list,
- )
- self.delete = async_to_raw_response_wrapper(
- vector_stores.delete,
- )
- self.search = async_to_raw_response_wrapper(
- vector_stores.search,
- )
-
- @cached_property
- def file_batches(self) -> AsyncFileBatchesResourceWithRawResponse:
- return AsyncFileBatchesResourceWithRawResponse(self._vector_stores.file_batches)
-
- @cached_property
- def files(self) -> AsyncFilesResourceWithRawResponse:
- return AsyncFilesResourceWithRawResponse(self._vector_stores.files)
-
-
-class VectorStoresResourceWithStreamingResponse:
- def __init__(self, vector_stores: VectorStoresResource) -> None:
- self._vector_stores = vector_stores
-
- self.create = to_streamed_response_wrapper(
- vector_stores.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- vector_stores.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- vector_stores.update,
- )
- self.list = to_streamed_response_wrapper(
- vector_stores.list,
- )
- self.delete = to_streamed_response_wrapper(
- vector_stores.delete,
- )
- self.search = to_streamed_response_wrapper(
- vector_stores.search,
- )
-
- @cached_property
- def file_batches(self) -> FileBatchesResourceWithStreamingResponse:
- return FileBatchesResourceWithStreamingResponse(self._vector_stores.file_batches)
-
- @cached_property
- def files(self) -> FilesResourceWithStreamingResponse:
- return FilesResourceWithStreamingResponse(self._vector_stores.files)
-
-
-class AsyncVectorStoresResourceWithStreamingResponse:
- def __init__(self, vector_stores: AsyncVectorStoresResource) -> None:
- self._vector_stores = vector_stores
-
- self.create = async_to_streamed_response_wrapper(
- vector_stores.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- vector_stores.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- vector_stores.update,
- )
- self.list = async_to_streamed_response_wrapper(
- vector_stores.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- vector_stores.delete,
- )
- self.search = async_to_streamed_response_wrapper(
- vector_stores.search,
- )
-
- @cached_property
- def file_batches(self) -> AsyncFileBatchesResourceWithStreamingResponse:
- return AsyncFileBatchesResourceWithStreamingResponse(self._vector_stores.file_batches)
-
- @cached_property
- def files(self) -> AsyncFilesResourceWithStreamingResponse:
- return AsyncFilesResourceWithStreamingResponse(self._vector_stores.files)
diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py
deleted file mode 100644
index 49c8d424..00000000
--- a/src/digitalocean_genai_sdk/types/__init__.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .batch import Batch as Batch
-from .model import Model as Model
-from .upload import Upload as Upload
-from .response import Response as Response
-from .includable import Includable as Includable
-from .openai_file import OpenAIFile as OpenAIFile
-from .input_content import InputContent as InputContent
-from .input_message import InputMessage as InputMessage
-from .thread_object import ThreadObject as ThreadObject
-from .output_message import OutputMessage as OutputMessage
-from .reasoning_item import ReasoningItem as ReasoningItem
-from .usage_response import UsageResponse as UsageResponse
-from .compound_filter import CompoundFilter as CompoundFilter
-from .function_object import FunctionObject as FunctionObject
-from .images_response import ImagesResponse as ImagesResponse
-from .assistant_object import AssistantObject as AssistantObject
-from .file_list_params import FileListParams as FileListParams
-from .reasoning_effort import ReasoningEffort as ReasoningEffort
-from .voice_ids_shared import VoiceIDsShared as VoiceIDsShared
-from .batch_list_params import BatchListParams as BatchListParams
-from .comparison_filter import ComparisonFilter as ComparisonFilter
-from .computer_tool_call import ComputerToolCall as ComputerToolCall
-from .file_list_response import FileListResponse as FileListResponse
-from .file_search_ranker import FileSearchRanker as FileSearchRanker
-from .file_upload_params import FileUploadParams as FileUploadParams
-from .function_tool_call import FunctionToolCall as FunctionToolCall
-from .batch_create_params import BatchCreateParams as BatchCreateParams
-from .batch_list_response import BatchListResponse as BatchListResponse
-from .input_content_param import InputContentParam as InputContentParam
-from .input_message_param import InputMessageParam as InputMessageParam
-from .model_list_response import ModelListResponse as ModelListResponse
-from .response_properties import ResponseProperties as ResponseProperties
-from .vector_store_object import VectorStoreObject as VectorStoreObject
-from .assistant_tools_code import AssistantToolsCode as AssistantToolsCode
-from .audit_log_actor_user import AuditLogActorUser as AuditLogActorUser
-from .audit_log_event_type import AuditLogEventType as AuditLogEventType
-from .file_delete_response import FileDeleteResponse as FileDeleteResponse
-from .output_message_param import OutputMessageParam as OutputMessageParam
-from .reasoning_item_param import ReasoningItemParam as ReasoningItemParam
-from .thread_create_params import ThreadCreateParams as ThreadCreateParams
-from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams
-from .upload_create_params import UploadCreateParams as UploadCreateParams
-from .web_search_tool_call import WebSearchToolCall as WebSearchToolCall
-from .assistant_list_params import AssistantListParams as AssistantListParams
-from .compound_filter_param import CompoundFilterParam as CompoundFilterParam
-from .file_search_tool_call import FileSearchToolCall as FileSearchToolCall
-from .function_object_param import FunctionObjectParam as FunctionObjectParam
-from .model_delete_response import ModelDeleteResponse as ModelDeleteResponse
-from .transcription_segment import TranscriptionSegment as TranscriptionSegment
-from .response_create_params import ResponseCreateParams as ResponseCreateParams
-from .thread_delete_response import ThreadDeleteResponse as ThreadDeleteResponse
-from .upload_add_part_params import UploadAddPartParams as UploadAddPartParams
-from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
-from .voice_ids_shared_param import VoiceIDsSharedParam as VoiceIDsSharedParam
-from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
-from .assistant_list_response import AssistantListResponse as AssistantListResponse
-from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
-from .comparison_filter_param import ComparisonFilterParam as ComparisonFilterParam
-from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
-from .assistant_tools_function import AssistantToolsFunction as AssistantToolsFunction
-from .completion_create_params import CompletionCreateParams as CompletionCreateParams
-from .computer_tool_call_param import ComputerToolCallParam as ComputerToolCallParam
-from .function_tool_call_param import FunctionToolCallParam as FunctionToolCallParam
-from .image_create_edit_params import ImageCreateEditParams as ImageCreateEditParams
-from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams
-from .static_chunking_strategy import StaticChunkingStrategy as StaticChunkingStrategy
-from .stop_configuration_param import StopConfigurationParam as StopConfigurationParam
-from .upload_add_part_response import UploadAddPartResponse as UploadAddPartResponse
-from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
-from .assistant_delete_response import AssistantDeleteResponse as AssistantDeleteResponse
-from .computer_tool_call_output import ComputerToolCallOutput as ComputerToolCallOutput
-from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse
-from .function_tool_call_output import FunctionToolCallOutput as FunctionToolCallOutput
-from .model_response_properties import ModelResponseProperties as ModelResponseProperties
-from .assistant_supported_models import AssistantSupportedModels as AssistantSupportedModels
-from .assistant_tools_code_param import AssistantToolsCodeParam as AssistantToolsCodeParam
-from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
-from .moderation_classify_params import ModerationClassifyParams as ModerationClassifyParams
-from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
-from .vector_store_list_response import VectorStoreListResponse as VectorStoreListResponse
-from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
-from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
-from .web_search_tool_call_param import WebSearchToolCallParam as WebSearchToolCallParam
-from .assistant_tools_file_search import AssistantToolsFileSearch as AssistantToolsFileSearch
-from .create_thread_request_param import CreateThreadRequestParam as CreateThreadRequestParam
-from .file_search_tool_call_param import FileSearchToolCallParam as FileSearchToolCallParam
-from .audio_generate_speech_params import AudioGenerateSpeechParams as AudioGenerateSpeechParams
-from .audio_translate_audio_params import AudioTranslateAudioParams as AudioTranslateAudioParams
-from .moderation_classify_response import ModerationClassifyResponse as ModerationClassifyResponse
-from .vector_store_delete_response import VectorStoreDeleteResponse as VectorStoreDeleteResponse
-from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
-from .audio_transcribe_audio_params import AudioTranscribeAudioParams as AudioTranscribeAudioParams
-from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
-from .organization_get_costs_params import OrganizationGetCostsParams as OrganizationGetCostsParams
-from .vector_store_expiration_after import VectorStoreExpirationAfter as VectorStoreExpirationAfter
-from .assistant_tools_function_param import AssistantToolsFunctionParam as AssistantToolsFunctionParam
-from .audio_translate_audio_response import AudioTranslateAudioResponse as AudioTranslateAudioResponse
-from .file_retrieve_content_response import FileRetrieveContentResponse as FileRetrieveContentResponse
-from .image_create_generation_params import ImageCreateGenerationParams as ImageCreateGenerationParams
-from .realtime_create_session_params import RealtimeCreateSessionParams as RealtimeCreateSessionParams
-from .static_chunking_strategy_param import StaticChunkingStrategyParam as StaticChunkingStrategyParam
-from .audio_transcribe_audio_response import AudioTranscribeAudioResponse as AudioTranscribeAudioResponse
-from .computer_tool_call_output_param import ComputerToolCallOutputParam as ComputerToolCallOutputParam
-from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck as ComputerToolCallSafetyCheck
-from .function_tool_call_output_param import FunctionToolCallOutputParam as FunctionToolCallOutputParam
-from .realtime_create_session_response import RealtimeCreateSessionResponse as RealtimeCreateSessionResponse
-from .response_list_input_items_params import ResponseListInputItemsParams as ResponseListInputItemsParams
-from .assistant_tools_file_search_param import AssistantToolsFileSearchParam as AssistantToolsFileSearchParam
-from .response_list_input_items_response import ResponseListInputItemsResponse as ResponseListInputItemsResponse
-from .organization_list_audit_logs_params import OrganizationListAuditLogsParams as OrganizationListAuditLogsParams
-from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam as VectorStoreExpirationAfterParam
-from .auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam as AutoChunkingStrategyRequestParam
-from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam
-from .assistants_api_response_format_option import (
- AssistantsAPIResponseFormatOption as AssistantsAPIResponseFormatOption,
-)
-from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam as ComputerToolCallSafetyCheckParam
-from .organization_list_audit_logs_response import (
- OrganizationListAuditLogsResponse as OrganizationListAuditLogsResponse,
-)
-from .static_chunking_strategy_request_param import (
- StaticChunkingStrategyRequestParam as StaticChunkingStrategyRequestParam,
-)
-from .assistants_api_response_format_option_param import (
- AssistantsAPIResponseFormatOptionParam as AssistantsAPIResponseFormatOptionParam,
-)
-from .realtime_create_transcription_session_params import (
- RealtimeCreateTranscriptionSessionParams as RealtimeCreateTranscriptionSessionParams,
-)
-from .realtime_create_transcription_session_response import (
- RealtimeCreateTranscriptionSessionResponse as RealtimeCreateTranscriptionSessionResponse,
-)
diff --git a/src/digitalocean_genai_sdk/types/assistant_create_params.py b/src/digitalocean_genai_sdk/types/assistant_create_params.py
deleted file mode 100644
index b89e4742..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_create_params.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .reasoning_effort import ReasoningEffort
-from .assistant_supported_models import AssistantSupportedModels
-from .assistant_tools_code_param import AssistantToolsCodeParam
-from .assistant_tools_function_param import AssistantToolsFunctionParam
-from .assistant_tools_file_search_param import AssistantToolsFileSearchParam
-from .assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-
-__all__ = [
- "AssistantCreateParams",
- "ToolResources",
- "ToolResourcesCodeInterpreter",
- "ToolResourcesFileSearch",
- "ToolResourcesFileSearchVectorStore",
- "ToolResourcesFileSearchVectorStoreChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic",
- "Tool",
-]
-
-
-class AssistantCreateParams(TypedDict, total=False):
- model: Required[Union[str, AssistantSupportedModels]]
- """ID of the model to use.
-
- You can use the [List models](/docs/api-reference/models/list) API to see all of
- your available models, or see our [Model overview](/docs/models) for
- descriptions of them.
- """
-
- description: Optional[str]
- """The description of the assistant. The maximum length is 512 characters."""
-
- instructions: Optional[str]
- """The system instructions that the assistant uses.
-
- The maximum length is 256,000 characters.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- name: Optional[str]
- """The name of the assistant. The maximum length is 256 characters."""
-
- reasoning_effort: Optional[ReasoningEffort]
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOptionParam]
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
- """
-
- tool_resources: Optional[ToolResources]
- """A set of resources that are used by the assistant's tools.
-
- The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
- tools: Iterable[Tool]
- """A list of tool enabled on the assistant.
-
- There can be a maximum of 128 tools per assistant. Tools can be of types
- `code_interpreter`, `file_search`, or `function`.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
- """
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False):
- type: Required[Literal["auto"]]
- """Always `auto`."""
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False):
- chunk_overlap_tokens: Required[int]
- """The number of tokens that overlap between chunks. The default value is `400`.
-
- Note that the overlap must not exceed half of `max_chunk_size_tokens`.
- """
-
- max_chunk_size_tokens: Required[int]
- """The maximum number of tokens in each chunk.
-
- The default value is `800`. The minimum value is `100` and the maximum value is
- `4096`.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False):
- static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic]
-
- type: Required[Literal["static"]]
- """Always `static`."""
-
-
-ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
- ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy,
- ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy,
-]
-
-
-class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
- chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy.
- """
-
- file_ids: List[str]
- """A list of [file](/docs/api-reference/files) IDs to add to the vector store.
-
- There can be a maximum of 10000 files in a vector store.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- The [vector store](/docs/api-reference/vector-stores/object) attached to this
- assistant. There can be a maximum of 1 vector store attached to the assistant.
- """
-
- vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
- """
- A helper to create a [vector store](/docs/api-reference/vector-stores/object)
- with file_ids and attach it to this assistant. There can be a maximum of 1
- vector store attached to the assistant.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
-
-
-Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam]
diff --git a/src/digitalocean_genai_sdk/types/assistant_delete_response.py b/src/digitalocean_genai_sdk/types/assistant_delete_response.py
deleted file mode 100644
index 04207049..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["AssistantDeleteResponse"]
-
-
-class AssistantDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["assistant.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/assistant_list_params.py b/src/digitalocean_genai_sdk/types/assistant_list_params.py
deleted file mode 100644
index 834ffbca..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_list_params.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["AssistantListParams"]
-
-
-class AssistantListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/assistant_list_response.py b/src/digitalocean_genai_sdk/types/assistant_list_response.py
deleted file mode 100644
index dfc90bfa..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-from .assistant_object import AssistantObject
-
-__all__ = ["AssistantListResponse"]
-
-
-class AssistantListResponse(BaseModel):
- data: List[AssistantObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/assistant_object.py b/src/digitalocean_genai_sdk/types/assistant_object.py
deleted file mode 100644
index 4aa71ab9..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_object.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-from .assistant_tools_code import AssistantToolsCode
-from .assistant_tools_function import AssistantToolsFunction
-from .assistant_tools_file_search import AssistantToolsFileSearch
-from .assistants_api_response_format_option import AssistantsAPIResponseFormatOption
-
-__all__ = ["AssistantObject", "Tool", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
-
-Tool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearch, AssistantToolsFunction]
-
-
-class ToolResourcesCodeInterpreter(BaseModel):
- file_ids: Optional[List[str]] = None
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter`` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearch(BaseModel):
- vector_store_ids: Optional[List[str]] = None
- """
- The ID of the [vector store](/docs/api-reference/vector-stores/object) attached
- to this assistant. There can be a maximum of 1 vector store attached to the
- assistant.
- """
-
-
-class ToolResources(BaseModel):
- code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
-
- file_search: Optional[ToolResourcesFileSearch] = None
-
-
-class AssistantObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the assistant was created."""
-
- description: Optional[str] = None
- """The description of the assistant. The maximum length is 512 characters."""
-
- instructions: Optional[str] = None
- """The system instructions that the assistant uses.
-
- The maximum length is 256,000 characters.
- """
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- model: str
- """ID of the model to use.
-
- You can use the [List models](/docs/api-reference/models/list) API to see all of
- your available models, or see our [Model overview](/docs/models) for
- descriptions of them.
- """
-
- name: Optional[str] = None
- """The name of the assistant. The maximum length is 256 characters."""
-
- object: Literal["assistant"]
- """The object type, which is always `assistant`."""
-
- tools: List[Tool]
- """A list of tool enabled on the assistant.
-
- There can be a maximum of 128 tools per assistant. Tools can be of types
- `code_interpreter`, `file_search`, or `function`.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOption] = None
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- temperature: Optional[float] = None
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
- """
-
- tool_resources: Optional[ToolResources] = None
- """A set of resources that are used by the assistant's tools.
-
- The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
- top_p: Optional[float] = None
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
- """
diff --git a/src/digitalocean_genai_sdk/types/assistant_supported_models.py b/src/digitalocean_genai_sdk/types/assistant_supported_models.py
deleted file mode 100644
index 999b7f23..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_supported_models.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["AssistantSupportedModels"]
-
-AssistantSupportedModels: TypeAlias = Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4.5-preview",
- "gpt-4.5-preview-2025-02-27",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
-]
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_code.py b/src/digitalocean_genai_sdk/types/assistant_tools_code.py
deleted file mode 100644
index 73a40a71..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_code.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["AssistantToolsCode"]
-
-
-class AssistantToolsCode(BaseModel):
- type: Literal["code_interpreter"]
- """The type of tool being defined: `code_interpreter`"""
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py
deleted file mode 100644
index 01420dda..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["AssistantToolsCodeParam"]
-
-
-class AssistantToolsCodeParam(TypedDict, total=False):
- type: Required[Literal["code_interpreter"]]
- """The type of tool being defined: `code_interpreter`"""
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py b/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py
deleted file mode 100644
index 3c834718..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .file_search_ranker import FileSearchRanker
-
-__all__ = ["AssistantToolsFileSearch", "FileSearch", "FileSearchRankingOptions"]
-
-
-class FileSearchRankingOptions(BaseModel):
- score_threshold: float
- """The score threshold for the file search.
-
- All values must be a floating point number between 0 and 1.
- """
-
- ranker: Optional[FileSearchRanker] = None
- """The ranker to use for the file search.
-
- If not specified will use the `auto` ranker.
- """
-
-
-class FileSearch(BaseModel):
- max_num_results: Optional[int] = None
- """The maximum number of results the file search tool should output.
-
- The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
- should be between 1 and 50 inclusive.
-
- Note that the file search tool may output fewer than `max_num_results` results.
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
- ranking_options: Optional[FileSearchRankingOptions] = None
- """The ranking options for the file search.
-
- If not specified, the file search tool will use the `auto` ranker and a
- score_threshold of 0.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
-
-class AssistantToolsFileSearch(BaseModel):
- type: Literal["file_search"]
- """The type of tool being defined: `file_search`"""
-
- file_search: Optional[FileSearch] = None
- """Overrides for the file search tool."""
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py
deleted file mode 100644
index 3f0e5af4..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-from .file_search_ranker import FileSearchRanker
-
-__all__ = ["AssistantToolsFileSearchParam", "FileSearch", "FileSearchRankingOptions"]
-
-
-class FileSearchRankingOptions(TypedDict, total=False):
- score_threshold: Required[float]
- """The score threshold for the file search.
-
- All values must be a floating point number between 0 and 1.
- """
-
- ranker: FileSearchRanker
- """The ranker to use for the file search.
-
- If not specified will use the `auto` ranker.
- """
-
-
-class FileSearch(TypedDict, total=False):
- max_num_results: int
- """The maximum number of results the file search tool should output.
-
- The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
- should be between 1 and 50 inclusive.
-
- Note that the file search tool may output fewer than `max_num_results` results.
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
- ranking_options: FileSearchRankingOptions
- """The ranking options for the file search.
-
- If not specified, the file search tool will use the `auto` ranker and a
- score_threshold of 0.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
-
-class AssistantToolsFileSearchParam(TypedDict, total=False):
- type: Required[Literal["file_search"]]
- """The type of tool being defined: `file_search`"""
-
- file_search: FileSearch
- """Overrides for the file search tool."""
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_function.py b/src/digitalocean_genai_sdk/types/assistant_tools_function.py
deleted file mode 100644
index 89326d54..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_function.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .function_object import FunctionObject
-
-__all__ = ["AssistantToolsFunction"]
-
-
-class AssistantToolsFunction(BaseModel):
- function: FunctionObject
-
- type: Literal["function"]
- """The type of tool being defined: `function`"""
diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py
deleted file mode 100644
index 4e9ecf3d..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-from .function_object_param import FunctionObjectParam
-
-__all__ = ["AssistantToolsFunctionParam"]
-
-
-class AssistantToolsFunctionParam(TypedDict, total=False):
- function: Required[FunctionObjectParam]
-
- type: Required[Literal["function"]]
- """The type of tool being defined: `function`"""
diff --git a/src/digitalocean_genai_sdk/types/assistant_update_params.py b/src/digitalocean_genai_sdk/types/assistant_update_params.py
deleted file mode 100644
index cf301dd4..00000000
--- a/src/digitalocean_genai_sdk/types/assistant_update_params.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import TypeAlias, TypedDict
-
-from .reasoning_effort import ReasoningEffort
-from .assistant_supported_models import AssistantSupportedModels
-from .assistant_tools_code_param import AssistantToolsCodeParam
-from .assistant_tools_function_param import AssistantToolsFunctionParam
-from .assistant_tools_file_search_param import AssistantToolsFileSearchParam
-from .assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-
-__all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "Tool"]
-
-
-class AssistantUpdateParams(TypedDict, total=False):
- description: Optional[str]
- """The description of the assistant. The maximum length is 512 characters."""
-
- instructions: Optional[str]
- """The system instructions that the assistant uses.
-
- The maximum length is 256,000 characters.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- model: Union[str, AssistantSupportedModels]
- """ID of the model to use.
-
- You can use the [List models](/docs/api-reference/models/list) API to see all of
- your available models, or see our [Model overview](/docs/models) for
- descriptions of them.
- """
-
- name: Optional[str]
- """The name of the assistant. The maximum length is 256 characters."""
-
- reasoning_effort: Optional[ReasoningEffort]
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOptionParam]
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
- """
-
- tool_resources: Optional[ToolResources]
- """A set of resources that are used by the assistant's tools.
-
- The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
- tools: Iterable[Tool]
- """A list of tool enabled on the assistant.
-
- There can be a maximum of 128 tools per assistant. Tools can be of types
- `code_interpreter`, `file_search`, or `function`.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
- """
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- Overrides the list of [file](/docs/api-reference/files) IDs made available to
- the `code_interpreter` tool. There can be a maximum of 20 files associated with
- the tool.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- Overrides the [vector store](/docs/api-reference/vector-stores/object) attached
- to this assistant. There can be a maximum of 1 vector store attached to the
- assistant.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
-
-
-Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam]
diff --git a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py b/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py
deleted file mode 100644
index 07c4f71e..00000000
--- a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union
-from typing_extensions import Literal, TypeAlias
-
-from .chat.response_format_text import ResponseFormatText
-from .chat.response_format_json_object import ResponseFormatJsonObject
-from .chat.response_format_json_schema import ResponseFormatJsonSchema
-
-__all__ = ["AssistantsAPIResponseFormatOption"]
-
-AssistantsAPIResponseFormatOption: TypeAlias = Union[
- Literal["auto"], ResponseFormatText, ResponseFormatJsonObject, ResponseFormatJsonSchema
-]
diff --git a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py b/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py
deleted file mode 100644
index 7dbf967f..00000000
--- a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, TypeAlias
-
-from .chat.response_format_text_param import ResponseFormatTextParam
-from .chat.response_format_json_object_param import ResponseFormatJsonObjectParam
-from .chat.response_format_json_schema_param import ResponseFormatJsonSchemaParam
-
-__all__ = ["AssistantsAPIResponseFormatOptionParam"]
-
-AssistantsAPIResponseFormatOptionParam: TypeAlias = Union[
- Literal["auto"], ResponseFormatTextParam, ResponseFormatJsonObjectParam, ResponseFormatJsonSchemaParam
-]
diff --git a/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py b/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py
deleted file mode 100644
index 8857594a..00000000
--- a/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, Required, TypedDict
-
-from .voice_ids_shared_param import VoiceIDsSharedParam
-
-__all__ = ["AudioGenerateSpeechParams"]
-
-
-class AudioGenerateSpeechParams(TypedDict, total=False):
- input: Required[str]
- """The text to generate audio for. The maximum length is 4096 characters."""
-
- model: Required[Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]]]
- """
- One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or
- `gpt-4o-mini-tts`.
- """
-
- voice: Required[VoiceIDsSharedParam]
- """The voice to use when generating the audio.
-
- Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`,
- `nova`, `sage`, `shimmer`, and `verse`. Previews of the voices are available in
- the [Text to speech guide](/docs/guides/text-to-speech#voice-options).
- """
-
- instructions: str
- """Control the voice of your generated audio with additional instructions.
-
- Does not work with `tts-1` or `tts-1-hd`.
- """
-
- response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"]
- """The format to audio in.
-
- Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
- """
-
- speed: float
- """The speed of the generated audio.
-
- Select a value from `0.25` to `4.0`. `1.0` is the default.
- """
diff --git a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py b/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py
deleted file mode 100644
index cbc15157..00000000
--- a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["AudioTranscribeAudioParams"]
-
-
-class AudioTranscribeAudioParams(TypedDict, total=False):
- file: Required[FileTypes]
- """
- The audio file object (not file name) to transcribe, in one of these formats:
- flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
- """
-
- model: Required[Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]]]
- """ID of the model to use.
-
- The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1`
- (which is powered by our open source Whisper V2 model).
- """
-
- include: List[Literal["logprobs"]]
- """Additional information to include in the transcription response.
-
- `logprobs` will return the log probabilities of the tokens in the response to
- understand the model's confidence in the transcription. `logprobs` only works
- with response_format set to `json` and only with the models `gpt-4o-transcribe`
- and `gpt-4o-mini-transcribe`.
- """
-
- language: str
- """The language of the input audio.
-
- Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
- """
-
- prompt: str
- """An optional text to guide the model's style or continue a previous audio
- segment.
-
- The [prompt](/docs/guides/speech-to-text#prompting) should match the audio
- language.
- """
-
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
- """
- The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
- the only supported format is `json`.
- """
-
- stream: Optional[bool]
- """
- If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the
- [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
- for more information.
-
- Note: Streaming is not supported for the `whisper-1` model and will be ignored.
- """
-
- temperature: float
- """The sampling temperature, between 0 and 1.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
- """
-
- timestamp_granularities: List[Literal["word", "segment"]]
- """The timestamp granularities to populate for this transcription.
-
- `response_format` must be set `verbose_json` to use timestamp granularities.
- Either or both of these options are supported: `word`, or `segment`. Note: There
- is no additional latency for segment timestamps, but generating word timestamps
- incurs additional latency.
- """
diff --git a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py b/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py
deleted file mode 100644
index 54b999ed..00000000
--- a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import TypeAlias
-
-from .._models import BaseModel
-from .transcription_segment import TranscriptionSegment
-
-__all__ = [
- "AudioTranscribeAudioResponse",
- "CreateTranscriptionResponseJson",
- "CreateTranscriptionResponseJsonLogprob",
- "CreateTranscriptionResponseVerboseJson",
- "CreateTranscriptionResponseVerboseJsonWord",
-]
-
-
-class CreateTranscriptionResponseJsonLogprob(BaseModel):
- token: str
- """The token that was used to generate the log probability."""
-
- bytes: List[int]
- """The bytes that were used to generate the log probability."""
-
- logprob: float
- """The log probability of the token."""
-
-
-class CreateTranscriptionResponseJson(BaseModel):
- text: str
- """The transcribed text."""
-
- logprobs: Optional[List[CreateTranscriptionResponseJsonLogprob]] = None
- """The log probabilities of the tokens in the transcription.
-
- Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`
- if `logprobs` is added to the `include` array.
- """
-
-
-class CreateTranscriptionResponseVerboseJsonWord(BaseModel):
- end: float
- """End time of the word in seconds."""
-
- start: float
- """Start time of the word in seconds."""
-
- word: str
- """The text content of the word."""
-
-
-class CreateTranscriptionResponseVerboseJson(BaseModel):
- duration: float
- """The duration of the input audio."""
-
- language: str
- """The language of the input audio."""
-
- text: str
- """The transcribed text."""
-
- segments: Optional[List[TranscriptionSegment]] = None
- """Segments of the transcribed text and their corresponding details."""
-
- words: Optional[List[CreateTranscriptionResponseVerboseJsonWord]] = None
- """Extracted words and their corresponding timestamps."""
-
-
-AudioTranscribeAudioResponse: TypeAlias = Union[CreateTranscriptionResponseJson, CreateTranscriptionResponseVerboseJson]
diff --git a/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py b/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py
deleted file mode 100644
index cc222f14..00000000
--- a/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["AudioTranslateAudioParams"]
-
-
-class AudioTranslateAudioParams(TypedDict, total=False):
- file: Required[FileTypes]
- """
- The audio file object (not file name) translate, in one of these formats: flac,
- mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
- """
-
- model: Required[Union[str, Literal["whisper-1"]]]
- """ID of the model to use.
-
- Only `whisper-1` (which is powered by our open source Whisper V2 model) is
- currently available.
- """
-
- prompt: str
- """An optional text to guide the model's style or continue a previous audio
- segment.
-
- The [prompt](/docs/guides/speech-to-text#prompting) should be in English.
- """
-
- response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
- """
- The format of the output, in one of these options: `json`, `text`, `srt`,
- `verbose_json`, or `vtt`.
- """
-
- temperature: float
- """The sampling temperature, between 0 and 1.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic. If set to 0, the model will use
- [log probability](https://en.wikipedia.org/wiki/Log_probability) to
- automatically increase the temperature until certain thresholds are hit.
- """
diff --git a/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py b/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py
deleted file mode 100644
index 74d08a73..00000000
--- a/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import TypeAlias
-
-from .._models import BaseModel
-from .transcription_segment import TranscriptionSegment
-
-__all__ = ["AudioTranslateAudioResponse", "CreateTranslationResponseJson", "CreateTranslationResponseVerboseJson"]
-
-
-class CreateTranslationResponseJson(BaseModel):
- text: str
-
-
-class CreateTranslationResponseVerboseJson(BaseModel):
- duration: float
- """The duration of the input audio."""
-
- language: str
- """The language of the output translation (always `english`)."""
-
- text: str
- """The translated text."""
-
- segments: Optional[List[TranscriptionSegment]] = None
- """Segments of the translated text and their corresponding details."""
-
-
-AudioTranslateAudioResponse: TypeAlias = Union[CreateTranslationResponseJson, CreateTranslationResponseVerboseJson]
diff --git a/src/digitalocean_genai_sdk/types/audit_log_actor_user.py b/src/digitalocean_genai_sdk/types/audit_log_actor_user.py
deleted file mode 100644
index f3da325d..00000000
--- a/src/digitalocean_genai_sdk/types/audit_log_actor_user.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from .._models import BaseModel
-
-__all__ = ["AuditLogActorUser"]
-
-
-class AuditLogActorUser(BaseModel):
- id: Optional[str] = None
- """The user id."""
-
- email: Optional[str] = None
- """The user email."""
diff --git a/src/digitalocean_genai_sdk/types/audit_log_event_type.py b/src/digitalocean_genai_sdk/types/audit_log_event_type.py
deleted file mode 100644
index 2031cbb8..00000000
--- a/src/digitalocean_genai_sdk/types/audit_log_event_type.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["AuditLogEventType"]
-
-AuditLogEventType: TypeAlias = Literal[
- "api_key.created",
- "api_key.updated",
- "api_key.deleted",
- "invite.sent",
- "invite.accepted",
- "invite.deleted",
- "login.succeeded",
- "login.failed",
- "logout.succeeded",
- "logout.failed",
- "organization.updated",
- "project.created",
- "project.updated",
- "project.archived",
- "service_account.created",
- "service_account.updated",
- "service_account.deleted",
- "rate_limit.updated",
- "rate_limit.deleted",
- "user.added",
- "user.updated",
- "user.deleted",
-]
diff --git a/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py
deleted file mode 100644
index 5c0c131e..00000000
--- a/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["AutoChunkingStrategyRequestParam"]
-
-
-class AutoChunkingStrategyRequestParam(TypedDict, total=False):
- type: Required[Literal["auto"]]
- """Always `auto`."""
diff --git a/src/digitalocean_genai_sdk/types/batch.py b/src/digitalocean_genai_sdk/types/batch.py
deleted file mode 100644
index 1fdd6928..00000000
--- a/src/digitalocean_genai_sdk/types/batch.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["Batch", "Errors", "ErrorsData", "RequestCounts"]
-
-
-class ErrorsData(BaseModel):
- code: Optional[str] = None
- """An error code identifying the error type."""
-
- line: Optional[int] = None
- """The line number of the input file where the error occurred, if applicable."""
-
- message: Optional[str] = None
- """A human-readable message providing more details about the error."""
-
- param: Optional[str] = None
- """The name of the parameter that caused the error, if applicable."""
-
-
-class Errors(BaseModel):
- data: Optional[List[ErrorsData]] = None
-
- object: Optional[str] = None
- """The object type, which is always `list`."""
-
-
-class RequestCounts(BaseModel):
- completed: int
- """Number of requests that have been completed successfully."""
-
- failed: int
- """Number of requests that have failed."""
-
- total: int
- """Total number of requests in the batch."""
-
-
-class Batch(BaseModel):
- id: str
-
- completion_window: str
- """The time frame within which the batch should be processed."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the batch was created."""
-
- endpoint: str
- """The OpenAI API endpoint used by the batch."""
-
- input_file_id: str
- """The ID of the input file for the batch."""
-
- object: Literal["batch"]
- """The object type, which is always `batch`."""
-
- status: Literal[
- "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled"
- ]
- """The current status of the batch."""
-
- cancelled_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch was cancelled."""
-
- cancelling_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch started cancelling."""
-
- completed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch was completed."""
-
- error_file_id: Optional[str] = None
- """The ID of the file containing the outputs of requests with errors."""
-
- errors: Optional[Errors] = None
-
- expired_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch expired."""
-
- expires_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch will expire."""
-
- failed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch failed."""
-
- finalizing_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch started finalizing."""
-
- in_progress_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the batch started processing."""
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- output_file_id: Optional[str] = None
- """The ID of the file containing the outputs of successfully executed requests."""
-
- request_counts: Optional[RequestCounts] = None
- """The request counts for different statuses within the batch."""
diff --git a/src/digitalocean_genai_sdk/types/batch_create_params.py b/src/digitalocean_genai_sdk/types/batch_create_params.py
deleted file mode 100644
index 08243244..00000000
--- a/src/digitalocean_genai_sdk/types/batch_create_params.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["BatchCreateParams"]
-
-
-class BatchCreateParams(TypedDict, total=False):
- completion_window: Required[Literal["24h"]]
- """The time frame within which the batch should be processed.
-
- Currently only `24h` is supported.
- """
-
- endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
- """The endpoint to be used for all requests in the batch.
-
- Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and
- `/v1/completions` are supported. Note that `/v1/embeddings` batches are also
- restricted to a maximum of 50,000 embedding inputs across all requests in the
- batch.
- """
-
- input_file_id: Required[str]
- """The ID of an uploaded file that contains requests for the new batch.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your input file must be formatted as a
- [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with
- the purpose `batch`. The file can contain up to 50,000 requests, and can be up
- to 200 MB in size.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
diff --git a/src/digitalocean_genai_sdk/types/batch_list_params.py b/src/digitalocean_genai_sdk/types/batch_list_params.py
deleted file mode 100644
index ef5e966b..00000000
--- a/src/digitalocean_genai_sdk/types/batch_list_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["BatchListParams"]
-
-
-class BatchListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/batch_list_response.py b/src/digitalocean_genai_sdk/types/batch_list_response.py
deleted file mode 100644
index 87c4f9b8..00000000
--- a/src/digitalocean_genai_sdk/types/batch_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .batch import Batch
-from .._models import BaseModel
-
-__all__ = ["BatchListResponse"]
-
-
-class BatchListResponse(BaseModel):
- data: List[Batch]
-
- has_more: bool
-
- object: Literal["list"]
-
- first_id: Optional[str] = None
-
- last_id: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/chat/__init__.py b/src/digitalocean_genai_sdk/types/chat/__init__.py
deleted file mode 100644
index cfa8c56a..00000000
--- a/src/digitalocean_genai_sdk/types/chat/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .usage import Usage as Usage
-from .token_logprob import TokenLogprob as TokenLogprob
-from .create_response import CreateResponse as CreateResponse
-from .response_message import ResponseMessage as ResponseMessage
-from .message_tool_call import MessageToolCall as MessageToolCall
-from .web_search_location import WebSearchLocation as WebSearchLocation
-from .response_format_text import ResponseFormatText as ResponseFormatText
-from .completion_list_params import CompletionListParams as CompletionListParams
-from .model_ids_shared_param import ModelIDsSharedParam as ModelIDsSharedParam
-from .message_tool_call_param import MessageToolCallParam as MessageToolCallParam
-from .web_search_context_size import WebSearchContextSize as WebSearchContextSize
-from .completion_create_params import CompletionCreateParams as CompletionCreateParams
-from .completion_list_response import CompletionListResponse as CompletionListResponse
-from .completion_update_params import CompletionUpdateParams as CompletionUpdateParams
-from .web_search_location_param import WebSearchLocationParam as WebSearchLocationParam
-from .completion_delete_response import CompletionDeleteResponse as CompletionDeleteResponse
-from .response_format_text_param import ResponseFormatTextParam as ResponseFormatTextParam
-from .response_format_json_object import ResponseFormatJsonObject as ResponseFormatJsonObject
-from .response_format_json_schema import ResponseFormatJsonSchema as ResponseFormatJsonSchema
-from .completion_list_messages_params import CompletionListMessagesParams as CompletionListMessagesParams
-from .completion_list_messages_response import CompletionListMessagesResponse as CompletionListMessagesResponse
-from .response_format_json_object_param import ResponseFormatJsonObjectParam as ResponseFormatJsonObjectParam
-from .response_format_json_schema_param import ResponseFormatJsonSchemaParam as ResponseFormatJsonSchemaParam
-from .request_message_content_part_text_param import (
- RequestMessageContentPartTextParam as RequestMessageContentPartTextParam,
-)
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_create_params.py b/src/digitalocean_genai_sdk/types/chat/completion_create_params.py
deleted file mode 100644
index d11f9322..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_create_params.py
+++ /dev/null
@@ -1,662 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..reasoning_effort import ReasoningEffort
-from ..function_object_param import FunctionObjectParam
-from .model_ids_shared_param import ModelIDsSharedParam
-from ..voice_ids_shared_param import VoiceIDsSharedParam
-from .message_tool_call_param import MessageToolCallParam
-from .web_search_context_size import WebSearchContextSize
-from ..stop_configuration_param import StopConfigurationParam
-from .web_search_location_param import WebSearchLocationParam
-from .response_format_text_param import ResponseFormatTextParam
-from .response_format_json_object_param import ResponseFormatJsonObjectParam
-from .response_format_json_schema_param import ResponseFormatJsonSchemaParam
-from ..chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
-from .request_message_content_part_text_param import RequestMessageContentPartTextParam
-
-__all__ = [
- "CompletionCreateParams",
- "Message",
- "MessageChatCompletionRequestDeveloperMessage",
- "MessageChatCompletionRequestSystemMessage",
- "MessageChatCompletionRequestUserMessage",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPart",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile",
- "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile",
- "MessageChatCompletionRequestAssistantMessage",
- "MessageChatCompletionRequestAssistantMessageAudio",
- "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart",
- "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal",
- "MessageChatCompletionRequestAssistantMessageFunctionCall",
- "MessageChatCompletionRequestToolMessage",
- "MessageChatCompletionRequestFunctionMessage",
- "Audio",
- "FunctionCall",
- "FunctionCallChatCompletionFunctionCallOption",
- "Function",
- "Prediction",
- "ResponseFormat",
- "ToolChoice",
- "ToolChoiceChatCompletionNamedToolChoice",
- "ToolChoiceChatCompletionNamedToolChoiceFunction",
- "Tool",
- "WebSearchOptions",
- "WebSearchOptionsUserLocation",
-]
-
-
-class CompletionCreateParams(TypedDict, total=False):
- messages: Required[Iterable[Message]]
- """A list of messages comprising the conversation so far.
-
- Depending on the [model](/docs/models) you use, different message types
- (modalities) are supported, like [text](/docs/guides/text-generation),
- [images](/docs/guides/vision), and [audio](/docs/guides/audio).
- """
-
- model: Required[ModelIDsSharedParam]
- """Model ID used to generate the response, like `gpt-4o` or `o1`.
-
- OpenAI offers a wide range of models with different capabilities, performance
- characteristics, and price points. Refer to the [model guide](/docs/models) to
- browse and compare available models.
- """
-
- audio: Optional[Audio]
- """Parameters for audio output.
-
- Required when audio output is requested with `modalities: ["audio"]`.
- [Learn more](/docs/guides/audio).
- """
-
- frequency_penalty: Optional[float]
- """Number between -2.0 and 2.0.
-
- Positive values penalize new tokens based on their existing frequency in the
- text so far, decreasing the model's likelihood to repeat the same line verbatim.
- """
-
- function_call: FunctionCall
- """Deprecated in favor of `tool_choice`.
-
- Controls which (if any) function is called by the model.
-
- `none` means the model will not call a function and instead generates a message.
-
- `auto` means the model can pick between generating a message or calling a
- function.
-
- Specifying a particular function via `{"name": "my_function"}` forces the model
- to call that function.
-
- `none` is the default when no functions are present. `auto` is the default if
- functions are present.
- """
-
- functions: Iterable[Function]
- """Deprecated in favor of `tools`.
-
- A list of functions the model may generate JSON inputs for.
- """
-
- logit_bias: Optional[Dict[str, int]]
- """Modify the likelihood of specified tokens appearing in the completion.
-
- Accepts a JSON object that maps tokens (specified by their token ID in the
- tokenizer) to an associated bias value from -100 to 100. Mathematically, the
- bias is added to the logits generated by the model prior to sampling. The exact
- effect will vary per model, but values between -1 and 1 should decrease or
- increase likelihood of selection; values like -100 or 100 should result in a ban
- or exclusive selection of the relevant token.
- """
-
- logprobs: Optional[bool]
- """Whether to return log probabilities of the output tokens or not.
-
- If true, returns the log probabilities of each output token returned in the
- `content` of `message`.
- """
-
- max_completion_tokens: Optional[int]
- """
- An upper bound for the number of tokens that can be generated for a completion,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
- """
-
- max_tokens: Optional[int]
- """
- The maximum number of [tokens](/tokenizer) that can be generated in the chat
- completion. This value can be used to control
- [costs](https://openai.com/api/pricing/) for text generated via API.
-
- This value is now deprecated in favor of `max_completion_tokens`, and is not
- compatible with [o1 series models](/docs/guides/reasoning).
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- modalities: Optional[List[Literal["text", "audio"]]]
- """
- Output types that you would like the model to generate. Most models are capable
- of generating text, which is the default:
-
- `["text"]`
-
- The `gpt-4o-audio-preview` model can also be used to
- [generate audio](/docs/guides/audio). To request that this model generate both
- text and audio responses, you can use:
-
- `["text", "audio"]`
- """
-
- n: Optional[int]
- """How many chat completion choices to generate for each input message.
-
- Note that you will be charged based on the number of generated tokens across all
- of the choices. Keep `n` as `1` to minimize costs.
- """
-
- parallel_tool_calls: bool
- """
- Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
- """
-
- prediction: Optional[Prediction]
- """
- Static predicted output content, such as the content of a text file that is
- being regenerated.
- """
-
- presence_penalty: Optional[float]
- """Number between -2.0 and 2.0.
-
- Positive values penalize new tokens based on whether they appear in the text so
- far, increasing the model's likelihood to talk about new topics.
- """
-
- reasoning_effort: Optional[ReasoningEffort]
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- response_format: ResponseFormat
- """An object specifying the format that the model must output.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
- """
-
- seed: Optional[int]
- """
- This feature is in Beta. If specified, our system will make a best effort to
- sample deterministically, such that repeated requests with the same `seed` and
- parameters should return the same result. Determinism is not guaranteed, and you
- should refer to the `system_fingerprint` response parameter to monitor changes
- in the backend.
- """
-
- service_tier: Optional[Literal["auto", "default"]]
- """Specifies the latency tier to use for processing the request.
-
- This parameter is relevant for customers subscribed to the scale tier service:
-
- - If set to 'auto', and the Project is Scale tier enabled, the system will
- utilize scale tier credits until they are exhausted.
- - If set to 'auto', and the Project is not Scale tier enabled, the request will
- be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
- - If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
- - When not set, the default behavior is 'auto'.
-
- When this parameter is set, the response body will include the `service_tier`
- utilized.
- """
-
- stop: Optional[StopConfigurationParam]
- """Up to 4 sequences where the API will stop generating further tokens.
-
- The returned text will not contain the stop sequence.
- """
-
- store: Optional[bool]
- """
- Whether or not to store the output of this chat completion request for use in
- our [model distillation](/docs/guides/distillation) or
- [evals](/docs/guides/evals) products.
- """
-
- stream: Optional[bool]
- """
- If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/chat/streaming) for more
- information, along with the
- [streaming responses](/docs/guides/streaming-responses) guide for more
- information on how to handle the streaming events.
- """
-
- stream_options: Optional[ChatCompletionStreamOptionsParam]
- """Options for streaming response. Only set this when you set `stream: true`."""
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic. We generally recommend altering
- this or `top_p` but not both.
- """
-
- tool_choice: ToolChoice
- """
- Controls which (if any) tool is called by the model. `none` means the model will
- not call any tool and instead generates a message. `auto` means the model can
- pick between generating a message or calling one or more tools. `required` means
- the model must call one or more tools. Specifying a particular tool via
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
-
- `none` is the default when no tools are present. `auto` is the default if tools
- are present.
- """
-
- tools: Iterable[Tool]
- """A list of tools the model may call.
-
- Currently, only functions are supported as a tool. Use this to provide a list of
- functions the model may generate JSON inputs for. A max of 128 functions are
- supported.
- """
-
- top_logprobs: Optional[int]
- """
- An integer between 0 and 20 specifying the number of most likely tokens to
- return at each token position, each with an associated log probability.
- `logprobs` must be set to `true` if this parameter is used.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
-
- web_search_options: WebSearchOptions
- """
- This tool searches the web for relevant results to use in a response. Learn more
- about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
- """
-
-
-class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
- content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]]
- """The contents of the developer message."""
-
- role: Required[Literal["developer"]]
- """The role of the messages author, in this case `developer`."""
-
- name: str
- """An optional name for the participant.
-
- Provides the model information to differentiate between participants of the same
- role.
- """
-
-
-class MessageChatCompletionRequestSystemMessage(TypedDict, total=False):
- content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]]
- """The contents of the system message."""
-
- role: Required[Literal["system"]]
- """The role of the messages author, in this case `system`."""
-
- name: str
- """An optional name for the participant.
-
- Provides the model information to differentiate between participants of the same
- role.
- """
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL(
- TypedDict, total=False
-):
- url: Required[str]
- """Either a URL of the image or the base64 encoded image data."""
-
- detail: Literal["auto", "low", "high"]
- """Specifies the detail level of the image.
-
- Learn more in the
- [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
- """
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage(
- TypedDict, total=False
-):
- image_url: Required[
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL
- ]
-
- type: Required[Literal["image_url"]]
- """The type of the content part."""
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio(
- TypedDict, total=False
-):
- data: Required[str]
- """Base64 encoded audio data."""
-
- format: Required[Literal["wav", "mp3"]]
- """The format of the encoded audio data. Currently supports "wav" and "mp3"."""
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio(
- TypedDict, total=False
-):
- input_audio: Required[
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio
- ]
-
- type: Required[Literal["input_audio"]]
- """The type of the content part. Always `input_audio`."""
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile(
- TypedDict, total=False
-):
- file_data: str
- """
- The base64 encoded file data, used when passing the file to the model as a
- string.
- """
-
- file_id: str
- """The ID of an uploaded file to use as input."""
-
- filename: str
- """The name of the file, used when passing the file to the model as a string."""
-
-
-class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile(
- TypedDict, total=False
-):
- file: Required[
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile
- ]
-
- type: Required[Literal["file"]]
- """The type of the content part. Always `file`."""
-
-
-MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[
- RequestMessageContentPartTextParam,
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage,
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio,
- MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile,
-]
-
-
-class MessageChatCompletionRequestUserMessage(TypedDict, total=False):
- content: Required[Union[str, Iterable[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]]
- """The contents of the user message."""
-
- role: Required[Literal["user"]]
- """The role of the messages author, in this case `user`."""
-
- name: str
- """An optional name for the participant.
-
- Provides the model information to differentiate between participants of the same
- role.
- """
-
-
-class MessageChatCompletionRequestAssistantMessageAudio(TypedDict, total=False):
- id: Required[str]
- """Unique identifier for a previous audio response from the model."""
-
-
-class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal(
- TypedDict, total=False
-):
- refusal: Required[str]
- """The refusal message generated by the model."""
-
- type: Required[Literal["refusal"]]
- """The type of the content part."""
-
-
-MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[
- RequestMessageContentPartTextParam,
- MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal,
-]
-
-
-class MessageChatCompletionRequestAssistantMessageFunctionCall(TypedDict, total=False):
- arguments: Required[str]
- """
- The arguments to call the function with, as generated by the model in JSON
- format. Note that the model does not always generate valid JSON, and may
- hallucinate parameters not defined by your function schema. Validate the
- arguments in your code before calling your function.
- """
-
- name: Required[str]
- """The name of the function to call."""
-
-
-class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False):
- role: Required[Literal["assistant"]]
- """The role of the messages author, in this case `assistant`."""
-
- audio: Optional[MessageChatCompletionRequestAssistantMessageAudio]
- """Data about a previous audio response from the model.
-
- [Learn more](/docs/guides/audio).
- """
-
- content: Union[str, Iterable[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None]
- """The contents of the assistant message.
-
- Required unless `tool_calls` or `function_call` is specified.
- """
-
- function_call: Optional[MessageChatCompletionRequestAssistantMessageFunctionCall]
- """Deprecated and replaced by `tool_calls`.
-
- The name and arguments of a function that should be called, as generated by the
- model.
- """
-
- name: str
- """An optional name for the participant.
-
- Provides the model information to differentiate between participants of the same
- role.
- """
-
- refusal: Optional[str]
- """The refusal message by the assistant."""
-
- tool_calls: Iterable[MessageToolCallParam]
- """The tool calls generated by the model, such as function calls."""
-
-
-class MessageChatCompletionRequestToolMessage(TypedDict, total=False):
- content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]]
- """The contents of the tool message."""
-
- role: Required[Literal["tool"]]
- """The role of the messages author, in this case `tool`."""
-
- tool_call_id: Required[str]
- """Tool call that this message is responding to."""
-
-
-class MessageChatCompletionRequestFunctionMessage(TypedDict, total=False):
- content: Required[Optional[str]]
- """The contents of the function message."""
-
- name: Required[str]
- """The name of the function to call."""
-
- role: Required[Literal["function"]]
- """The role of the messages author, in this case `function`."""
-
-
-Message: TypeAlias = Union[
- MessageChatCompletionRequestDeveloperMessage,
- MessageChatCompletionRequestSystemMessage,
- MessageChatCompletionRequestUserMessage,
- MessageChatCompletionRequestAssistantMessage,
- MessageChatCompletionRequestToolMessage,
- MessageChatCompletionRequestFunctionMessage,
-]
-
-
-class Audio(TypedDict, total=False):
- format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]]
- """Specifies the output audio format.
-
- Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
- """
-
- voice: Required[VoiceIDsSharedParam]
- """The voice the model uses to respond.
-
- Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and
- `shimmer`.
- """
-
-
-class FunctionCallChatCompletionFunctionCallOption(TypedDict, total=False):
- name: Required[str]
- """The name of the function to call."""
-
-
-FunctionCall: TypeAlias = Union[Literal["none", "auto"], FunctionCallChatCompletionFunctionCallOption]
-
-
-class Function(TypedDict, total=False):
- name: Required[str]
- """The name of the function to be called.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: str
- """
- A description of what the function does, used by the model to choose when and
- how to call the function.
- """
-
- parameters: Dict[str, object]
- """The parameters the functions accepts, described as a JSON Schema object.
-
- See the [guide](/docs/guides/function-calling) for examples, and the
- [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
- documentation about the format.
-
- Omitting `parameters` defines a function with an empty parameter list.
- """
-
-
-class Prediction(TypedDict, total=False):
- content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]]
- """
- The content that should be matched when generating a model response. If
- generated tokens would match this content, the entire model response can be
- returned much more quickly.
- """
-
- type: Required[Literal["content"]]
- """The type of the predicted content you want to provide.
-
- This type is currently always `content`.
- """
-
-
-ResponseFormat: TypeAlias = Union[ResponseFormatTextParam, ResponseFormatJsonSchemaParam, ResponseFormatJsonObjectParam]
-
-
-class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False):
- name: Required[str]
- """The name of the function to call."""
-
-
-class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False):
- function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction]
-
- type: Required[Literal["function"]]
- """The type of the tool. Currently, only `function` is supported."""
-
-
-ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice]
-
-
-class Tool(TypedDict, total=False):
- function: Required[FunctionObjectParam]
-
- type: Required[Literal["function"]]
- """The type of the tool. Currently, only `function` is supported."""
-
-
-class WebSearchOptionsUserLocation(TypedDict, total=False):
- approximate: Required[WebSearchLocationParam]
- """Approximate location parameters for the search."""
-
- type: Required[Literal["approximate"]]
- """The type of location approximation. Always `approximate`."""
-
-
-class WebSearchOptions(TypedDict, total=False):
- search_context_size: WebSearchContextSize
- """
- High level guidance for the amount of context window space to use for the
- search. One of `low`, `medium`, or `high`. `medium` is the default.
- """
-
- user_location: Optional[WebSearchOptionsUserLocation]
- """Approximate location parameters for the search."""
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py b/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py
deleted file mode 100644
index 9e456e16..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["CompletionDeleteResponse"]
-
-
-class CompletionDeleteResponse(BaseModel):
- id: str
- """The ID of the chat completion that was deleted."""
-
- deleted: bool
- """Whether the chat completion was deleted."""
-
- object: Literal["chat.completion.deleted"]
- """The type of object being deleted."""
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py b/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py
deleted file mode 100644
index 43f4a7cc..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["CompletionListMessagesParams"]
-
-
-class CompletionListMessagesParams(TypedDict, total=False):
- after: str
- """Identifier for the last message from the previous pagination request."""
-
- limit: int
- """Number of messages to retrieve."""
-
- order: Literal["asc", "desc"]
- """Sort order for messages by timestamp.
-
- Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
- """
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py b/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py
deleted file mode 100644
index 57087a63..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .response_message import ResponseMessage
-
-__all__ = ["CompletionListMessagesResponse", "Data"]
-
-
-class Data(ResponseMessage):
- id: str
- """The identifier of the chat message."""
-
-
-class CompletionListMessagesResponse(BaseModel):
- data: List[Data]
- """An array of chat completion message objects."""
-
- first_id: str
- """The identifier of the first chat message in the data array."""
-
- has_more: bool
- """Indicates whether there are more chat messages available."""
-
- last_id: str
- """The identifier of the last chat message in the data array."""
-
- object: Literal["list"]
- """The type of this object. It is always set to "list"."""
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_params.py b/src/digitalocean_genai_sdk/types/chat/completion_list_params.py
deleted file mode 100644
index 8f149e35..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_list_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["CompletionListParams"]
-
-
-class CompletionListParams(TypedDict, total=False):
- after: str
- """Identifier for the last chat completion from the previous pagination request."""
-
- limit: int
- """Number of Chat Completions to retrieve."""
-
- metadata: Optional[Dict[str, str]]
- """A list of metadata keys to filter the Chat Completions by. Example:
-
- `metadata[key1]=value1&metadata[key2]=value2`
- """
-
- model: str
- """The model used to generate the Chat Completions."""
-
- order: Literal["asc", "desc"]
- """Sort order for Chat Completions by timestamp.
-
- Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
- """
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_response.py b/src/digitalocean_genai_sdk/types/chat/completion_list_response.py
deleted file mode 100644
index 2899f598..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_list_response.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .create_response import CreateResponse
-
-__all__ = ["CompletionListResponse"]
-
-
-class CompletionListResponse(BaseModel):
- data: List[CreateResponse]
- """An array of chat completion objects."""
-
- first_id: str
- """The identifier of the first chat completion in the data array."""
-
- has_more: bool
- """Indicates whether there are more Chat Completions available."""
-
- last_id: str
- """The identifier of the last chat completion in the data array."""
-
- object: Literal["list"]
- """The type of this object. It is always set to "list"."""
diff --git a/src/digitalocean_genai_sdk/types/chat/completion_update_params.py b/src/digitalocean_genai_sdk/types/chat/completion_update_params.py
deleted file mode 100644
index 1f09ecaa..00000000
--- a/src/digitalocean_genai_sdk/types/chat/completion_update_params.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["CompletionUpdateParams"]
-
-
-class CompletionUpdateParams(TypedDict, total=False):
- metadata: Required[Optional[Dict[str, str]]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
diff --git a/src/digitalocean_genai_sdk/types/chat/create_response.py b/src/digitalocean_genai_sdk/types/chat/create_response.py
deleted file mode 100644
index a6320518..00000000
--- a/src/digitalocean_genai_sdk/types/chat/create_response.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .usage import Usage
-from ..._models import BaseModel
-from .token_logprob import TokenLogprob
-from .response_message import ResponseMessage
-
-__all__ = ["CreateResponse", "Choice", "ChoiceLogprobs"]
-
-
-class ChoiceLogprobs(BaseModel):
- content: Optional[List[TokenLogprob]] = None
- """A list of message content tokens with log probability information."""
-
- refusal: Optional[List[TokenLogprob]] = None
- """A list of message refusal tokens with log probability information."""
-
-
-class Choice(BaseModel):
- finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
- """The reason the model stopped generating tokens.
-
- This will be `stop` if the model hit a natural stop point or a provided stop
- sequence, `length` if the maximum number of tokens specified in the request was
- reached, `content_filter` if content was omitted due to a flag from our content
- filters, `tool_calls` if the model called a tool, or `function_call`
- (deprecated) if the model called a function.
- """
-
- index: int
- """The index of the choice in the list of choices."""
-
- logprobs: Optional[ChoiceLogprobs] = None
- """Log probability information for the choice."""
-
- message: ResponseMessage
- """A chat completion message generated by the model."""
-
-
-class CreateResponse(BaseModel):
- id: str
- """A unique identifier for the chat completion."""
-
- choices: List[Choice]
- """A list of chat completion choices.
-
- Can be more than one if `n` is greater than 1.
- """
-
- created: int
- """The Unix timestamp (in seconds) of when the chat completion was created."""
-
- model: str
- """The model used for the chat completion."""
-
- object: Literal["chat.completion"]
- """The object type, which is always `chat.completion`."""
-
- service_tier: Optional[Literal["scale", "default"]] = None
- """The service tier used for processing the request."""
-
- system_fingerprint: Optional[str] = None
- """This fingerprint represents the backend configuration that the model runs with.
-
- Can be used in conjunction with the `seed` request parameter to understand when
- backend changes have been made that might impact determinism.
- """
-
- usage: Optional[Usage] = None
- """Usage statistics for the completion request."""
diff --git a/src/digitalocean_genai_sdk/types/chat/message_tool_call.py b/src/digitalocean_genai_sdk/types/chat/message_tool_call.py
deleted file mode 100644
index abc22e05..00000000
--- a/src/digitalocean_genai_sdk/types/chat/message_tool_call.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["MessageToolCall", "Function"]
-
-
-class Function(BaseModel):
- arguments: str
- """
- The arguments to call the function with, as generated by the model in JSON
- format. Note that the model does not always generate valid JSON, and may
- hallucinate parameters not defined by your function schema. Validate the
- arguments in your code before calling your function.
- """
-
- name: str
- """The name of the function to call."""
-
-
-class MessageToolCall(BaseModel):
- id: str
- """The ID of the tool call."""
-
- function: Function
- """The function that the model called."""
-
- type: Literal["function"]
- """The type of the tool. Currently, only `function` is supported."""
diff --git a/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py b/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py
deleted file mode 100644
index da60f69a..00000000
--- a/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["MessageToolCallParam", "Function"]
-
-
-class Function(TypedDict, total=False):
- arguments: Required[str]
- """
- The arguments to call the function with, as generated by the model in JSON
- format. Note that the model does not always generate valid JSON, and may
- hallucinate parameters not defined by your function schema. Validate the
- arguments in your code before calling your function.
- """
-
- name: Required[str]
- """The name of the function to call."""
-
-
-class MessageToolCallParam(TypedDict, total=False):
- id: Required[str]
- """The ID of the tool call."""
-
- function: Required[Function]
- """The function that the model called."""
-
- type: Required[Literal["function"]]
- """The type of the tool. Currently, only `function` is supported."""
diff --git a/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py b/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py
deleted file mode 100644
index 497ba18c..00000000
--- a/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["ModelIDsSharedParam"]
-
-ModelIDsSharedParam: TypeAlias = Union[
- str,
- Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "o1-preview",
- "o1-preview-2024-09-12",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-audio-preview",
- "gpt-4o-audio-preview-2024-10-01",
- "gpt-4o-audio-preview-2024-12-17",
- "gpt-4o-mini-audio-preview",
- "gpt-4o-mini-audio-preview-2024-12-17",
- "gpt-4o-search-preview",
- "gpt-4o-mini-search-preview",
- "gpt-4o-search-preview-2025-03-11",
- "gpt-4o-mini-search-preview-2025-03-11",
- "chatgpt-4o-latest",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- ],
-]
diff --git a/src/digitalocean_genai_sdk/types/chat/request_message_content_part_text_param.py b/src/digitalocean_genai_sdk/types/chat/request_message_content_part_text_param.py
deleted file mode 100644
index 8e83e40b..00000000
--- a/src/digitalocean_genai_sdk/types/chat/request_message_content_part_text_param.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["RequestMessageContentPartTextParam"]
-
-
-class RequestMessageContentPartTextParam(TypedDict, total=False):
- text: Required[str]
- """The text content."""
-
- type: Required[Literal["text"]]
- """The type of the content part."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py
deleted file mode 100644
index 17ca162a..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["ResponseFormatJsonObject"]
-
-
-class ResponseFormatJsonObject(BaseModel):
- type: Literal["json_object"]
- """The type of response format being defined. Always `json_object`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py
deleted file mode 100644
index 5296cec4..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ResponseFormatJsonObjectParam"]
-
-
-class ResponseFormatJsonObjectParam(TypedDict, total=False):
- type: Required[Literal["json_object"]]
- """The type of response format being defined. Always `json_object`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py
deleted file mode 100644
index a65bf052..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from ..._models import BaseModel
-
-__all__ = ["ResponseFormatJsonSchema", "JsonSchema"]
-
-
-class JsonSchema(BaseModel):
- name: str
- """The name of the response format.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: Optional[str] = None
- """
- A description of what the response format is for, used by the model to determine
- how to respond in the format.
- """
-
- schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None)
- """
- The schema for the response format, described as a JSON Schema object. Learn how
- to build JSON schemas [here](https://json-schema.org/).
- """
-
- strict: Optional[bool] = None
- """
- Whether to enable strict schema adherence when generating the output. If set to
- true, the model will always follow the exact schema defined in the `schema`
- field. Only a subset of JSON Schema is supported when `strict` is `true`. To
- learn more, read the
- [Structured Outputs guide](/docs/guides/structured-outputs).
- """
-
-
-class ResponseFormatJsonSchema(BaseModel):
- json_schema: JsonSchema
- """Structured Outputs configuration options, including a JSON Schema."""
-
- type: Literal["json_schema"]
- """The type of response format being defined. Always `json_schema`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py
deleted file mode 100644
index 32d254c3..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ResponseFormatJsonSchemaParam", "JsonSchema"]
-
-
-class JsonSchema(TypedDict, total=False):
- name: Required[str]
- """The name of the response format.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: str
- """
- A description of what the response format is for, used by the model to determine
- how to respond in the format.
- """
-
- schema: Dict[str, object]
- """
- The schema for the response format, described as a JSON Schema object. Learn how
- to build JSON schemas [here](https://json-schema.org/).
- """
-
- strict: Optional[bool]
- """
- Whether to enable strict schema adherence when generating the output. If set to
- true, the model will always follow the exact schema defined in the `schema`
- field. Only a subset of JSON Schema is supported when `strict` is `true`. To
- learn more, read the
- [Structured Outputs guide](/docs/guides/structured-outputs).
- """
-
-
-class ResponseFormatJsonSchemaParam(TypedDict, total=False):
- json_schema: Required[JsonSchema]
- """Structured Outputs configuration options, including a JSON Schema."""
-
- type: Required[Literal["json_schema"]]
- """The type of response format being defined. Always `json_schema`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_text.py b/src/digitalocean_genai_sdk/types/chat/response_format_text.py
deleted file mode 100644
index f0c8cfb7..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_text.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["ResponseFormatText"]
-
-
-class ResponseFormatText(BaseModel):
- type: Literal["text"]
- """The type of response format being defined. Always `text`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py
deleted file mode 100644
index 0d37573e..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ResponseFormatTextParam"]
-
-
-class ResponseFormatTextParam(TypedDict, total=False):
- type: Required[Literal["text"]]
- """The type of response format being defined. Always `text`."""
diff --git a/src/digitalocean_genai_sdk/types/chat/response_message.py b/src/digitalocean_genai_sdk/types/chat/response_message.py
deleted file mode 100644
index 940adf8f..00000000
--- a/src/digitalocean_genai_sdk/types/chat/response_message.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .message_tool_call import MessageToolCall
-
-__all__ = ["ResponseMessage", "Annotation", "AnnotationURLCitation", "Audio", "FunctionCall"]
-
-
-class AnnotationURLCitation(BaseModel):
- end_index: int
- """The index of the last character of the URL citation in the message."""
-
- start_index: int
- """The index of the first character of the URL citation in the message."""
-
- title: str
- """The title of the web resource."""
-
- url: str
- """The URL of the web resource."""
-
-
-class Annotation(BaseModel):
- type: Literal["url_citation"]
- """The type of the URL citation. Always `url_citation`."""
-
- url_citation: AnnotationURLCitation
- """A URL citation when using web search."""
-
-
-class Audio(BaseModel):
- id: str
- """Unique identifier for this audio response."""
-
- data: str
- """
- Base64 encoded audio bytes generated by the model, in the format specified in
- the request.
- """
-
- expires_at: int
- """
- The Unix timestamp (in seconds) for when this audio response will no longer be
- accessible on the server for use in multi-turn conversations.
- """
-
- transcript: str
- """Transcript of the audio generated by the model."""
-
-
-class FunctionCall(BaseModel):
- arguments: str
- """
- The arguments to call the function with, as generated by the model in JSON
- format. Note that the model does not always generate valid JSON, and may
- hallucinate parameters not defined by your function schema. Validate the
- arguments in your code before calling your function.
- """
-
- name: str
- """The name of the function to call."""
-
-
-class ResponseMessage(BaseModel):
- content: Optional[str] = None
- """The contents of the message."""
-
- refusal: Optional[str] = None
- """The refusal message generated by the model."""
-
- role: Literal["assistant"]
- """The role of the author of this message."""
-
- annotations: Optional[List[Annotation]] = None
- """
- Annotations for the message, when applicable, as when using the
- [web search tool](/docs/guides/tools-web-search?api-mode=chat).
- """
-
- audio: Optional[Audio] = None
- """
- If the audio output modality is requested, this object contains data about the
- audio response from the model. [Learn more](/docs/guides/audio).
- """
-
- function_call: Optional[FunctionCall] = None
- """Deprecated and replaced by `tool_calls`.
-
- The name and arguments of a function that should be called, as generated by the
- model.
- """
-
- tool_calls: Optional[List[MessageToolCall]] = None
- """The tool calls generated by the model, such as function calls."""
diff --git a/src/digitalocean_genai_sdk/types/chat/token_logprob.py b/src/digitalocean_genai_sdk/types/chat/token_logprob.py
deleted file mode 100644
index d31943f6..00000000
--- a/src/digitalocean_genai_sdk/types/chat/token_logprob.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-
-from ..._models import BaseModel
-
-__all__ = ["TokenLogprob", "TopLogprob"]
-
-
-class TopLogprob(BaseModel):
- token: str
- """The token."""
-
- bytes: Optional[List[int]] = None
- """A list of integers representing the UTF-8 bytes representation of the token.
-
- Useful in instances where characters are represented by multiple tokens and
- their byte representations must be combined to generate the correct text
- representation. Can be `null` if there is no bytes representation for the token.
- """
-
- logprob: float
- """The log probability of this token, if it is within the top 20 most likely
- tokens.
-
- Otherwise, the value `-9999.0` is used to signify that the token is very
- unlikely.
- """
-
-
-class TokenLogprob(BaseModel):
- token: str
- """The token."""
-
- bytes: Optional[List[int]] = None
- """A list of integers representing the UTF-8 bytes representation of the token.
-
- Useful in instances where characters are represented by multiple tokens and
- their byte representations must be combined to generate the correct text
- representation. Can be `null` if there is no bytes representation for the token.
- """
-
- logprob: float
- """The log probability of this token, if it is within the top 20 most likely
- tokens.
-
- Otherwise, the value `-9999.0` is used to signify that the token is very
- unlikely.
- """
-
- top_logprobs: List[TopLogprob]
- """List of the most likely tokens and their log probability, at this token
- position.
-
- In rare cases, there may be fewer than the number of requested `top_logprobs`
- returned.
- """
diff --git a/src/digitalocean_genai_sdk/types/chat/usage.py b/src/digitalocean_genai_sdk/types/chat/usage.py
deleted file mode 100644
index 1a7a1abf..00000000
--- a/src/digitalocean_genai_sdk/types/chat/usage.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ..._models import BaseModel
-
-__all__ = ["Usage", "CompletionTokensDetails", "PromptTokensDetails"]
-
-
-class CompletionTokensDetails(BaseModel):
- accepted_prediction_tokens: Optional[int] = None
- """
- When using Predicted Outputs, the number of tokens in the prediction that
- appeared in the completion.
- """
-
- audio_tokens: Optional[int] = None
- """Audio input tokens generated by the model."""
-
- reasoning_tokens: Optional[int] = None
- """Tokens generated by the model for reasoning."""
-
- rejected_prediction_tokens: Optional[int] = None
- """
- When using Predicted Outputs, the number of tokens in the prediction that did
- not appear in the completion. However, like reasoning tokens, these tokens are
- still counted in the total completion tokens for purposes of billing, output,
- and context window limits.
- """
-
-
-class PromptTokensDetails(BaseModel):
- audio_tokens: Optional[int] = None
- """Audio input tokens present in the prompt."""
-
- cached_tokens: Optional[int] = None
- """Cached tokens present in the prompt."""
-
-
-class Usage(BaseModel):
- completion_tokens: int
- """Number of tokens in the generated completion."""
-
- prompt_tokens: int
- """Number of tokens in the prompt."""
-
- total_tokens: int
- """Total number of tokens used in the request (prompt + completion)."""
-
- completion_tokens_details: Optional[CompletionTokensDetails] = None
- """Breakdown of tokens used in a completion."""
-
- prompt_tokens_details: Optional[PromptTokensDetails] = None
- """Breakdown of tokens used in the prompt."""
diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py b/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py
deleted file mode 100644
index 18b284a9..00000000
--- a/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["WebSearchContextSize"]
-
-WebSearchContextSize: TypeAlias = Literal["low", "medium", "high"]
diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_location.py b/src/digitalocean_genai_sdk/types/chat/web_search_location.py
deleted file mode 100644
index 192c4efa..00000000
--- a/src/digitalocean_genai_sdk/types/chat/web_search_location.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ..._models import BaseModel
-
-__all__ = ["WebSearchLocation"]
-
-
-class WebSearchLocation(BaseModel):
- city: Optional[str] = None
- """Free text input for the city of the user, e.g. `San Francisco`."""
-
- country: Optional[str] = None
- """
- The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
- the user, e.g. `US`.
- """
-
- region: Optional[str] = None
- """Free text input for the region of the user, e.g. `California`."""
-
- timezone: Optional[str] = None
- """
- The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
- user, e.g. `America/Los_Angeles`.
- """
diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py b/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py
deleted file mode 100644
index bc4d5a4c..00000000
--- a/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["WebSearchLocationParam"]
-
-
-class WebSearchLocationParam(TypedDict, total=False):
- city: str
- """Free text input for the city of the user, e.g. `San Francisco`."""
-
- country: str
- """
- The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
- the user, e.g. `US`.
- """
-
- region: str
- """Free text input for the region of the user, e.g. `California`."""
-
- timezone: str
- """
- The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
- user, e.g. `America/Los_Angeles`.
- """
diff --git a/src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py b/src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py
deleted file mode 100644
index 471e0eba..00000000
--- a/src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["ChatCompletionStreamOptionsParam"]
-
-
-class ChatCompletionStreamOptionsParam(TypedDict, total=False):
- include_usage: bool
- """If set, an additional chunk will be streamed before the `data: [DONE]` message.
-
- The `usage` field on this chunk shows the token usage statistics for the entire
- request, and the `choices` field will always be an empty array.
-
- All other chunks will also include a `usage` field, but with a null value.
- **NOTE:** If the stream is interrupted, you may not receive the final usage
- chunk which contains the total token usage for the request.
- """
diff --git a/src/digitalocean_genai_sdk/types/comparison_filter.py b/src/digitalocean_genai_sdk/types/comparison_filter.py
deleted file mode 100644
index 547aac28..00000000
--- a/src/digitalocean_genai_sdk/types/comparison_filter.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ComparisonFilter"]
-
-
-class ComparisonFilter(BaseModel):
- key: str
- """The key to compare against the value."""
-
- type: Literal["eq", "ne", "gt", "gte", "lt", "lte"]
- """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
-
- - `eq`: equals
- - `ne`: not equal
- - `gt`: greater than
- - `gte`: greater than or equal
- - `lt`: less than
- - `lte`: less than or equal
- """
-
- value: Union[str, float, bool]
- """
- The value to compare against the attribute key; supports string, number, or
- boolean types.
- """
diff --git a/src/digitalocean_genai_sdk/types/comparison_filter_param.py b/src/digitalocean_genai_sdk/types/comparison_filter_param.py
deleted file mode 100644
index 2df2d744..00000000
--- a/src/digitalocean_genai_sdk/types/comparison_filter_param.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ComparisonFilterParam"]
-
-
-class ComparisonFilterParam(TypedDict, total=False):
- key: Required[str]
- """The key to compare against the value."""
-
- type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]]
- """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
-
- - `eq`: equals
- - `ne`: not equal
- - `gt`: greater than
- - `gte`: greater than or equal
- - `lt`: less than
- - `lte`: less than or equal
- """
-
- value: Required[Union[str, float, bool]]
- """
- The value to compare against the attribute key; supports string, number, or
- boolean types.
- """
diff --git a/src/digitalocean_genai_sdk/types/completion_create_params.py b/src/digitalocean_genai_sdk/types/completion_create_params.py
deleted file mode 100644
index 36709c57..00000000
--- a/src/digitalocean_genai_sdk/types/completion_create_params.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .stop_configuration_param import StopConfigurationParam
-from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
-
-__all__ = ["CompletionCreateParams"]
-
-
-class CompletionCreateParams(TypedDict, total=False):
- model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]]
- """ID of the model to use.
-
- You can use the [List models](/docs/api-reference/models/list) API to see all of
- your available models, or see our [Model overview](/docs/models) for
- descriptions of them.
- """
-
- prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]]
- """
- The prompt(s) to generate completions for, encoded as a string, array of
- strings, array of tokens, or array of token arrays.
-
- Note that <|endoftext|> is the document separator that the model sees during
- training, so if a prompt is not specified the model will generate as if from the
- beginning of a new document.
- """
-
- best_of: Optional[int]
- """
- Generates `best_of` completions server-side and returns the "best" (the one with
- the highest log probability per token). Results cannot be streamed.
-
- When used with `n`, `best_of` controls the number of candidate completions and
- `n` specifies how many to return – `best_of` must be greater than `n`.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
- """
-
- echo: Optional[bool]
- """Echo back the prompt in addition to the completion"""
-
- frequency_penalty: Optional[float]
- """Number between -2.0 and 2.0.
-
- Positive values penalize new tokens based on their existing frequency in the
- text so far, decreasing the model's likelihood to repeat the same line verbatim.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
- """
-
- logit_bias: Optional[Dict[str, int]]
- """Modify the likelihood of specified tokens appearing in the completion.
-
- Accepts a JSON object that maps tokens (specified by their token ID in the GPT
- tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
- Mathematically, the bias is added to the logits generated by the model prior to
- sampling. The exact effect will vary per model, but values between -1 and 1
- should decrease or increase likelihood of selection; values like -100 or 100
- should result in a ban or exclusive selection of the relevant token.
-
- As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
- from being generated.
- """
-
- logprobs: Optional[int]
- """
- Include the log probabilities on the `logprobs` most likely output tokens, as
- well the chosen tokens. For example, if `logprobs` is 5, the API will return a
- list of the 5 most likely tokens. The API will always return the `logprob` of
- the sampled token, so there may be up to `logprobs+1` elements in the response.
-
- The maximum value for `logprobs` is 5.
- """
-
- max_tokens: Optional[int]
- """
- The maximum number of [tokens](/tokenizer) that can be generated in the
- completion.
-
- The token count of your prompt plus `max_tokens` cannot exceed the model's
- context length.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens.
- """
-
- n: Optional[int]
- """How many completions to generate for each prompt.
-
- **Note:** Because this parameter generates many completions, it can quickly
- consume your token quota. Use carefully and ensure that you have reasonable
- settings for `max_tokens` and `stop`.
- """
-
- presence_penalty: Optional[float]
- """Number between -2.0 and 2.0.
-
- Positive values penalize new tokens based on whether they appear in the text so
- far, increasing the model's likelihood to talk about new topics.
-
- [See more information about frequency and presence penalties.](/docs/guides/text-generation)
- """
-
- seed: Optional[int]
- """
- If specified, our system will make a best effort to sample deterministically,
- such that repeated requests with the same `seed` and parameters should return
- the same result.
-
- Determinism is not guaranteed, and you should refer to the `system_fingerprint`
- response parameter to monitor changes in the backend.
- """
-
- stop: Optional[StopConfigurationParam]
- """Up to 4 sequences where the API will stop generating further tokens.
-
- The returned text will not contain the stop sequence.
- """
-
- stream: Optional[bool]
- """Whether to stream back partial progress.
-
- If set, tokens will be sent as data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available, with the stream terminated by a `data: [DONE]`
- message.
- [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
- """
-
- stream_options: Optional[ChatCompletionStreamOptionsParam]
- """Options for streaming response. Only set this when you set `stream: true`."""
-
- suffix: Optional[str]
- """The suffix that comes after a completion of inserted text.
-
- This parameter is only supported for `gpt-3.5-turbo-instruct`.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
-
- We generally recommend altering this or `top_p` but not both.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/completion_create_response.py b/src/digitalocean_genai_sdk/types/completion_create_response.py
deleted file mode 100644
index 2e1028bf..00000000
--- a/src/digitalocean_genai_sdk/types/completion_create_response.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .chat.usage import Usage
-
-__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs"]
-
-
-class ChoiceLogprobs(BaseModel):
- text_offset: Optional[List[int]] = None
-
- token_logprobs: Optional[List[float]] = None
-
- tokens: Optional[List[str]] = None
-
- top_logprobs: Optional[List[Dict[str, float]]] = None
-
-
-class Choice(BaseModel):
- finish_reason: Literal["stop", "length", "content_filter"]
- """The reason the model stopped generating tokens.
-
- This will be `stop` if the model hit a natural stop point or a provided stop
- sequence, `length` if the maximum number of tokens specified in the request was
- reached, or `content_filter` if content was omitted due to a flag from our
- content filters.
- """
-
- index: int
-
- logprobs: Optional[ChoiceLogprobs] = None
-
- text: str
-
-
-class CompletionCreateResponse(BaseModel):
- id: str
- """A unique identifier for the completion."""
-
- choices: List[Choice]
- """The list of completion choices the model generated for the input prompt."""
-
- created: int
- """The Unix timestamp (in seconds) of when the completion was created."""
-
- model: str
- """The model used for completion."""
-
- object: Literal["text_completion"]
- """The object type, which is always "text_completion" """
-
- system_fingerprint: Optional[str] = None
- """This fingerprint represents the backend configuration that the model runs with.
-
- Can be used in conjunction with the `seed` request parameter to understand when
- backend changes have been made that might impact determinism.
- """
-
- usage: Optional[Usage] = None
- """Usage statistics for the completion request."""
diff --git a/src/digitalocean_genai_sdk/types/compound_filter.py b/src/digitalocean_genai_sdk/types/compound_filter.py
deleted file mode 100644
index bf1f793f..00000000
--- a/src/digitalocean_genai_sdk/types/compound_filter.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-from .comparison_filter import ComparisonFilter
-
-__all__ = ["CompoundFilter", "Filter"]
-
-Filter: TypeAlias = Union[ComparisonFilter, Dict[str, object]]
-
-
-class CompoundFilter(BaseModel):
- filters: List[Filter]
- """Array of filters to combine.
-
- Items can be `ComparisonFilter` or `CompoundFilter`.
- """
-
- type: Literal["and", "or"]
- """Type of operation: `and` or `or`."""
diff --git a/src/digitalocean_genai_sdk/types/compound_filter_param.py b/src/digitalocean_genai_sdk/types/compound_filter_param.py
deleted file mode 100644
index 1f66a965..00000000
--- a/src/digitalocean_genai_sdk/types/compound_filter_param.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .comparison_filter_param import ComparisonFilterParam
-
-__all__ = ["CompoundFilterParam", "Filter"]
-
-Filter: TypeAlias = Union[ComparisonFilterParam, Dict[str, object]]
-
-
-class CompoundFilterParam(TypedDict, total=False):
- filters: Required[Iterable[Filter]]
- """Array of filters to combine.
-
- Items can be `ComparisonFilter` or `CompoundFilter`.
- """
-
- type: Required[Literal["and", "or"]]
- """Type of operation: `and` or `or`."""
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call.py b/src/digitalocean_genai_sdk/types/computer_tool_call.py
deleted file mode 100644
index b127e694..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck
-
-__all__ = [
- "ComputerToolCall",
- "Action",
- "ActionClick",
- "ActionDoubleClick",
- "ActionDrag",
- "ActionDragPath",
- "ActionKeyPress",
- "ActionMove",
- "ActionScreenshot",
- "ActionScroll",
- "ActionType",
- "ActionWait",
-]
-
-
-class ActionClick(BaseModel):
- button: Literal["left", "right", "wheel", "back", "forward"]
- """Indicates which mouse button was pressed during the click.
-
- One of `left`, `right`, `wheel`, `back`, or `forward`.
- """
-
- type: Literal["click"]
- """Specifies the event type.
-
- For a click action, this property is always set to `click`.
- """
-
- x: int
- """The x-coordinate where the click occurred."""
-
- y: int
- """The y-coordinate where the click occurred."""
-
-
-class ActionDoubleClick(BaseModel):
- type: Literal["double_click"]
- """Specifies the event type.
-
- For a double click action, this property is always set to `double_click`.
- """
-
- x: int
- """The x-coordinate where the double click occurred."""
-
- y: int
- """The y-coordinate where the double click occurred."""
-
-
-class ActionDragPath(BaseModel):
- x: int
- """The x-coordinate."""
-
- y: int
- """The y-coordinate."""
-
-
-class ActionDrag(BaseModel):
- path: List[ActionDragPath]
- """An array of coordinates representing the path of the drag action.
-
- Coordinates will appear as an array of objects, eg
-
- ```
- [
-
- { x: 100, y: 200 },
- { x: 200, y: 300 }
- ]
- ```
- """
-
- type: Literal["drag"]
- """Specifies the event type.
-
- For a drag action, this property is always set to `drag`.
- """
-
-
-class ActionKeyPress(BaseModel):
- keys: List[str]
- """The combination of keys the model is requesting to be pressed.
-
- This is an array of strings, each representing a key.
- """
-
- type: Literal["keypress"]
- """Specifies the event type.
-
- For a keypress action, this property is always set to `keypress`.
- """
-
-
-class ActionMove(BaseModel):
- type: Literal["move"]
- """Specifies the event type.
-
- For a move action, this property is always set to `move`.
- """
-
- x: int
- """The x-coordinate to move to."""
-
- y: int
- """The y-coordinate to move to."""
-
-
-class ActionScreenshot(BaseModel):
- type: Literal["screenshot"]
- """Specifies the event type.
-
- For a screenshot action, this property is always set to `screenshot`.
- """
-
-
-class ActionScroll(BaseModel):
- scroll_x: int
- """The horizontal scroll distance."""
-
- scroll_y: int
- """The vertical scroll distance."""
-
- type: Literal["scroll"]
- """Specifies the event type.
-
- For a scroll action, this property is always set to `scroll`.
- """
-
- x: int
- """The x-coordinate where the scroll occurred."""
-
- y: int
- """The y-coordinate where the scroll occurred."""
-
-
-class ActionType(BaseModel):
- text: str
- """The text to type."""
-
- type: Literal["type"]
- """Specifies the event type.
-
- For a type action, this property is always set to `type`.
- """
-
-
-class ActionWait(BaseModel):
- type: Literal["wait"]
- """Specifies the event type.
-
- For a wait action, this property is always set to `wait`.
- """
-
-
-Action: TypeAlias = Union[
- ActionClick,
- ActionDoubleClick,
- ActionDrag,
- ActionKeyPress,
- ActionMove,
- ActionScreenshot,
- ActionScroll,
- ActionType,
- ActionWait,
-]
-
-
-class ComputerToolCall(BaseModel):
- id: str
- """The unique ID of the computer call."""
-
- action: Action
- """A click action."""
-
- call_id: str
- """An identifier used when responding to the tool call with output."""
-
- pending_safety_checks: List[ComputerToolCallSafetyCheck]
- """The pending safety checks for the computer call."""
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
-
- type: Literal["computer_call"]
- """The type of the computer call. Always `computer_call`."""
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_output.py b/src/digitalocean_genai_sdk/types/computer_tool_call_output.py
deleted file mode 100644
index 0133a29a..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call_output.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck
-
-__all__ = ["ComputerToolCallOutput", "Output"]
-
-
-class Output(BaseModel):
- type: Literal["computer_screenshot"]
- """Specifies the event type.
-
- For a computer screenshot, this property is always set to `computer_screenshot`.
- """
-
- file_id: Optional[str] = None
- """The identifier of an uploaded file that contains the screenshot."""
-
- image_url: Optional[str] = None
- """The URL of the screenshot image."""
-
-
-class ComputerToolCallOutput(BaseModel):
- call_id: str
- """The ID of the computer tool call that produced the output."""
-
- output: Output
- """A computer screenshot image used with the computer use tool."""
-
- type: Literal["computer_call_output"]
- """The type of the computer tool call output. Always `computer_call_output`."""
-
- id: Optional[str] = None
- """The ID of the computer tool call output."""
-
- acknowledged_safety_checks: Optional[List[ComputerToolCallSafetyCheck]] = None
- """
- The safety checks reported by the API that have been acknowledged by the
- developer.
- """
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of the message input.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when input items
- are returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py
deleted file mode 100644
index 764c4da8..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam
-
-__all__ = ["ComputerToolCallOutputParam", "Output"]
-
-
-class Output(TypedDict, total=False):
- type: Required[Literal["computer_screenshot"]]
- """Specifies the event type.
-
- For a computer screenshot, this property is always set to `computer_screenshot`.
- """
-
- file_id: str
- """The identifier of an uploaded file that contains the screenshot."""
-
- image_url: str
- """The URL of the screenshot image."""
-
-
-class ComputerToolCallOutputParam(TypedDict, total=False):
- call_id: Required[str]
- """The ID of the computer tool call that produced the output."""
-
- output: Required[Output]
- """A computer screenshot image used with the computer use tool."""
-
- type: Required[Literal["computer_call_output"]]
- """The type of the computer tool call output. Always `computer_call_output`."""
-
- id: str
- """The ID of the computer tool call output."""
-
- acknowledged_safety_checks: Iterable[ComputerToolCallSafetyCheckParam]
- """
- The safety checks reported by the API that have been acknowledged by the
- developer.
- """
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the message input.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when input items
- are returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_param.py
deleted file mode 100644
index 7fb87bfa..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call_param.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam
-
-__all__ = [
- "ComputerToolCallParam",
- "Action",
- "ActionClick",
- "ActionDoubleClick",
- "ActionDrag",
- "ActionDragPath",
- "ActionKeyPress",
- "ActionMove",
- "ActionScreenshot",
- "ActionScroll",
- "ActionType",
- "ActionWait",
-]
-
-
-class ActionClick(TypedDict, total=False):
- button: Required[Literal["left", "right", "wheel", "back", "forward"]]
- """Indicates which mouse button was pressed during the click.
-
- One of `left`, `right`, `wheel`, `back`, or `forward`.
- """
-
- type: Required[Literal["click"]]
- """Specifies the event type.
-
- For a click action, this property is always set to `click`.
- """
-
- x: Required[int]
- """The x-coordinate where the click occurred."""
-
- y: Required[int]
- """The y-coordinate where the click occurred."""
-
-
-class ActionDoubleClick(TypedDict, total=False):
- type: Required[Literal["double_click"]]
- """Specifies the event type.
-
- For a double click action, this property is always set to `double_click`.
- """
-
- x: Required[int]
- """The x-coordinate where the double click occurred."""
-
- y: Required[int]
- """The y-coordinate where the double click occurred."""
-
-
-class ActionDragPath(TypedDict, total=False):
- x: Required[int]
- """The x-coordinate."""
-
- y: Required[int]
- """The y-coordinate."""
-
-
-class ActionDrag(TypedDict, total=False):
- path: Required[Iterable[ActionDragPath]]
- """An array of coordinates representing the path of the drag action.
-
- Coordinates will appear as an array of objects, eg
-
- ```
- [
-
- { x: 100, y: 200 },
- { x: 200, y: 300 }
- ]
- ```
- """
-
- type: Required[Literal["drag"]]
- """Specifies the event type.
-
- For a drag action, this property is always set to `drag`.
- """
-
-
-class ActionKeyPress(TypedDict, total=False):
- keys: Required[List[str]]
- """The combination of keys the model is requesting to be pressed.
-
- This is an array of strings, each representing a key.
- """
-
- type: Required[Literal["keypress"]]
- """Specifies the event type.
-
- For a keypress action, this property is always set to `keypress`.
- """
-
-
-class ActionMove(TypedDict, total=False):
- type: Required[Literal["move"]]
- """Specifies the event type.
-
- For a move action, this property is always set to `move`.
- """
-
- x: Required[int]
- """The x-coordinate to move to."""
-
- y: Required[int]
- """The y-coordinate to move to."""
-
-
-class ActionScreenshot(TypedDict, total=False):
- type: Required[Literal["screenshot"]]
- """Specifies the event type.
-
- For a screenshot action, this property is always set to `screenshot`.
- """
-
-
-class ActionScroll(TypedDict, total=False):
- scroll_x: Required[int]
- """The horizontal scroll distance."""
-
- scroll_y: Required[int]
- """The vertical scroll distance."""
-
- type: Required[Literal["scroll"]]
- """Specifies the event type.
-
- For a scroll action, this property is always set to `scroll`.
- """
-
- x: Required[int]
- """The x-coordinate where the scroll occurred."""
-
- y: Required[int]
- """The y-coordinate where the scroll occurred."""
-
-
-class ActionType(TypedDict, total=False):
- text: Required[str]
- """The text to type."""
-
- type: Required[Literal["type"]]
- """Specifies the event type.
-
- For a type action, this property is always set to `type`.
- """
-
-
-class ActionWait(TypedDict, total=False):
- type: Required[Literal["wait"]]
- """Specifies the event type.
-
- For a wait action, this property is always set to `wait`.
- """
-
-
-Action: TypeAlias = Union[
- ActionClick,
- ActionDoubleClick,
- ActionDrag,
- ActionKeyPress,
- ActionMove,
- ActionScreenshot,
- ActionScroll,
- ActionType,
- ActionWait,
-]
-
-
-class ComputerToolCallParam(TypedDict, total=False):
- id: Required[str]
- """The unique ID of the computer call."""
-
- action: Required[Action]
- """A click action."""
-
- call_id: Required[str]
- """An identifier used when responding to the tool call with output."""
-
- pending_safety_checks: Required[Iterable[ComputerToolCallSafetyCheckParam]]
- """The pending safety checks for the computer call."""
-
- status: Required[Literal["in_progress", "completed", "incomplete"]]
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
-
- type: Required[Literal["computer_call"]]
- """The type of the computer call. Always `computer_call`."""
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py b/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py
deleted file mode 100644
index e24b9f35..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-
-__all__ = ["ComputerToolCallSafetyCheck"]
-
-
-class ComputerToolCallSafetyCheck(BaseModel):
- id: str
- """The ID of the pending safety check."""
-
- code: str
- """The type of the pending safety check."""
-
- message: str
- """Details about the pending safety check."""
diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py
deleted file mode 100644
index 859d6b59..00000000
--- a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ComputerToolCallSafetyCheckParam"]
-
-
-class ComputerToolCallSafetyCheckParam(TypedDict, total=False):
- id: Required[str]
- """The ID of the pending safety check."""
-
- code: Required[str]
- """The type of the pending safety check."""
-
- message: Required[str]
- """Details about the pending safety check."""
diff --git a/src/digitalocean_genai_sdk/types/create_thread_request_param.py b/src/digitalocean_genai_sdk/types/create_thread_request_param.py
deleted file mode 100644
index 3a8f59b4..00000000
--- a/src/digitalocean_genai_sdk/types/create_thread_request_param.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .threads.create_message_request_param import CreateMessageRequestParam
-
-__all__ = [
- "CreateThreadRequestParam",
- "ToolResources",
- "ToolResourcesCodeInterpreter",
- "ToolResourcesFileSearch",
- "ToolResourcesFileSearchVectorStore",
- "ToolResourcesFileSearchVectorStoreChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic",
-]
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False):
- type: Required[Literal["auto"]]
- """Always `auto`."""
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False):
- chunk_overlap_tokens: Required[int]
- """The number of tokens that overlap between chunks. The default value is `400`.
-
- Note that the overlap must not exceed half of `max_chunk_size_tokens`.
- """
-
- max_chunk_size_tokens: Required[int]
- """The maximum number of tokens in each chunk.
-
- The default value is `800`. The minimum value is `100` and the maximum value is
- `4096`.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False):
- static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic]
-
- type: Required[Literal["static"]]
- """Always `static`."""
-
-
-ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
- ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy,
- ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy,
-]
-
-
-class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
- chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy.
- """
-
- file_ids: List[str]
- """A list of [file](/docs/api-reference/files) IDs to add to the vector store.
-
- There can be a maximum of 10000 files in a vector store.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- The [vector store](/docs/api-reference/vector-stores/object) attached to this
- thread. There can be a maximum of 1 vector store attached to the thread.
- """
-
- vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
- """
- A helper to create a [vector store](/docs/api-reference/vector-stores/object)
- with file_ids and attach it to this thread. There can be a maximum of 1 vector
- store attached to the thread.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
-
-
-class CreateThreadRequestParam(TypedDict, total=False):
- messages: Iterable[CreateMessageRequestParam]
- """A list of [messages](/docs/api-reference/messages) to start the thread with."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- tool_resources: Optional[ToolResources]
- """
- A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
diff --git a/src/digitalocean_genai_sdk/types/embedding_create_params.py b/src/digitalocean_genai_sdk/types/embedding_create_params.py
deleted file mode 100644
index caf65415..00000000
--- a/src/digitalocean_genai_sdk/types/embedding_create_params.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["EmbeddingCreateParams"]
-
-
-class EmbeddingCreateParams(TypedDict, total=False):
- input: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]]]]
- """Input text to embed, encoded as a string or array of tokens.
-
- To embed multiple inputs in a single request, pass an array of strings or array
- of token arrays. The input must not exceed the max input tokens for the model
- (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any
- array must be 2048 dimensions or less.
- [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
- for counting tokens. Some models may also impose a limit on total number of
- tokens summed across inputs.
- """
-
- model: Required[Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]]]
- """ID of the model to use.
-
- You can use the [List models](/docs/api-reference/models/list) API to see all of
- your available models, or see our [Model overview](/docs/models) for
- descriptions of them.
- """
-
- dimensions: int
- """The number of dimensions the resulting output embeddings should have.
-
- Only supported in `text-embedding-3` and later models.
- """
-
- encoding_format: Literal["float", "base64"]
- """The format to return the embeddings in.
-
- Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/embedding_create_response.py b/src/digitalocean_genai_sdk/types/embedding_create_response.py
deleted file mode 100644
index e85daaba..00000000
--- a/src/digitalocean_genai_sdk/types/embedding_create_response.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["EmbeddingCreateResponse", "Data", "Usage"]
-
-
-class Data(BaseModel):
- embedding: List[float]
- """The embedding vector, which is a list of floats.
-
- The length of vector depends on the model as listed in the
- [embedding guide](/docs/guides/embeddings).
- """
-
- index: int
- """The index of the embedding in the list of embeddings."""
-
- object: Literal["embedding"]
- """The object type, which is always "embedding"."""
-
-
-class Usage(BaseModel):
- prompt_tokens: int
- """The number of tokens used by the prompt."""
-
- total_tokens: int
- """The total number of tokens used by the request."""
-
-
-class EmbeddingCreateResponse(BaseModel):
- data: List[Data]
- """The list of embeddings generated by the model."""
-
- model: str
- """The name of the model used to generate the embedding."""
-
- object: Literal["list"]
- """The object type, which is always "list"."""
-
- usage: Usage
- """The usage information for the request."""
diff --git a/src/digitalocean_genai_sdk/types/file_delete_response.py b/src/digitalocean_genai_sdk/types/file_delete_response.py
deleted file mode 100644
index 26e2e053..00000000
--- a/src/digitalocean_genai_sdk/types/file_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["FileDeleteResponse"]
-
-
-class FileDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["file"]
diff --git a/src/digitalocean_genai_sdk/types/file_list_params.py b/src/digitalocean_genai_sdk/types/file_list_params.py
deleted file mode 100644
index 058d874c..00000000
--- a/src/digitalocean_genai_sdk/types/file_list_params.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["FileListParams"]
-
-
-class FileListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 10,000, and the default is 10,000.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
-
- purpose: str
- """Only return files with the given purpose."""
diff --git a/src/digitalocean_genai_sdk/types/file_list_response.py b/src/digitalocean_genai_sdk/types/file_list_response.py
deleted file mode 100644
index db9ef641..00000000
--- a/src/digitalocean_genai_sdk/types/file_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-from .openai_file import OpenAIFile
-
-__all__ = ["FileListResponse"]
-
-
-class FileListResponse(BaseModel):
- data: List[OpenAIFile]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py b/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py
deleted file mode 100644
index 20c945db..00000000
--- a/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import TypeAlias
-
-__all__ = ["FileRetrieveContentResponse"]
-
-FileRetrieveContentResponse: TypeAlias = str
diff --git a/src/digitalocean_genai_sdk/types/file_search_ranker.py b/src/digitalocean_genai_sdk/types/file_search_ranker.py
deleted file mode 100644
index d4aabe5a..00000000
--- a/src/digitalocean_genai_sdk/types/file_search_ranker.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["FileSearchRanker"]
-
-FileSearchRanker: TypeAlias = Literal["auto", "default_2024_08_21"]
diff --git a/src/digitalocean_genai_sdk/types/file_search_tool_call.py b/src/digitalocean_genai_sdk/types/file_search_tool_call.py
deleted file mode 100644
index 04542379..00000000
--- a/src/digitalocean_genai_sdk/types/file_search_tool_call.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["FileSearchToolCall", "Result"]
-
-
-class Result(BaseModel):
- attributes: Optional[Dict[str, Union[str, float, bool]]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- file_id: Optional[str] = None
- """The unique ID of the file."""
-
- filename: Optional[str] = None
- """The name of the file."""
-
- score: Optional[float] = None
- """The relevance score of the file - a value between 0 and 1."""
-
- text: Optional[str] = None
- """The text that was retrieved from the file."""
-
-
-class FileSearchToolCall(BaseModel):
- id: str
- """The unique ID of the file search tool call."""
-
- queries: List[str]
- """The queries used to search for files."""
-
- status: Literal["in_progress", "searching", "completed", "incomplete", "failed"]
- """The status of the file search tool call.
-
- One of `in_progress`, `searching`, `incomplete` or `failed`,
- """
-
- type: Literal["file_search_call"]
- """The type of the file search tool call. Always `file_search_call`."""
-
- results: Optional[List[Result]] = None
- """The results of the file search tool call."""
diff --git a/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py b/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py
deleted file mode 100644
index 315dc90e..00000000
--- a/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["FileSearchToolCallParam", "Result"]
-
-
-class Result(TypedDict, total=False):
- attributes: Optional[Dict[str, Union[str, float, bool]]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- file_id: str
- """The unique ID of the file."""
-
- filename: str
- """The name of the file."""
-
- score: float
- """The relevance score of the file - a value between 0 and 1."""
-
- text: str
- """The text that was retrieved from the file."""
-
-
-class FileSearchToolCallParam(TypedDict, total=False):
- id: Required[str]
- """The unique ID of the file search tool call."""
-
- queries: Required[List[str]]
- """The queries used to search for files."""
-
- status: Required[Literal["in_progress", "searching", "completed", "incomplete", "failed"]]
- """The status of the file search tool call.
-
- One of `in_progress`, `searching`, `incomplete` or `failed`,
- """
-
- type: Required[Literal["file_search_call"]]
- """The type of the file search tool call. Always `file_search_call`."""
-
- results: Optional[Iterable[Result]]
- """The results of the file search tool call."""
diff --git a/src/digitalocean_genai_sdk/types/file_upload_params.py b/src/digitalocean_genai_sdk/types/file_upload_params.py
deleted file mode 100644
index 5b42fc50..00000000
--- a/src/digitalocean_genai_sdk/types/file_upload_params.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["FileUploadParams"]
-
-
-class FileUploadParams(TypedDict, total=False):
- file: Required[FileTypes]
- """The File object (not file name) to be uploaded."""
-
- purpose: Required[Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"]]
- """The intended purpose of the uploaded file.
-
- One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch
- API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision
- fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used
- for eval data sets
- """
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py
deleted file mode 100644
index 6b7dcea7..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .fine_tuning_job import FineTuningJob as FineTuningJob
-from .job_list_params import JobListParams as JobListParams
-from .fine_tune_method import FineTuneMethod as FineTuneMethod
-from .job_create_params import JobCreateParams as JobCreateParams
-from .job_list_response import JobListResponse as JobListResponse
-from .fine_tune_method_param import FineTuneMethodParam as FineTuneMethodParam
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py
deleted file mode 100644
index 6b30e048..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .permission_create_params import PermissionCreateParams as PermissionCreateParams
-from .permission_delete_response import PermissionDeleteResponse as PermissionDeleteResponse
-from .permission_retrieve_params import PermissionRetrieveParams as PermissionRetrieveParams
-from .list_fine_tuning_checkpoint_permission import (
- ListFineTuningCheckpointPermission as ListFineTuningCheckpointPermission,
-)
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py
deleted file mode 100644
index 9136bf5d..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["ListFineTuningCheckpointPermission", "Data"]
-
-
-class Data(BaseModel):
- id: str
- """The permission identifier, which can be referenced in the API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the permission was created."""
-
- object: Literal["checkpoint.permission"]
- """The object type, which is always "checkpoint.permission"."""
-
- project_id: str
- """The project identifier that the permission is for."""
-
-
-class ListFineTuningCheckpointPermission(BaseModel):
- data: List[Data]
-
- has_more: bool
-
- object: Literal["list"]
-
- first_id: Optional[str] = None
-
- last_id: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py
deleted file mode 100644
index 92f98f21..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Required, TypedDict
-
-__all__ = ["PermissionCreateParams"]
-
-
-class PermissionCreateParams(TypedDict, total=False):
- project_ids: Required[List[str]]
- """The project identifiers to grant access to."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py
deleted file mode 100644
index 1a92d912..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["PermissionDeleteResponse"]
-
-
-class PermissionDeleteResponse(BaseModel):
- id: str
- """The ID of the fine-tuned model checkpoint permission that was deleted."""
-
- deleted: bool
- """Whether the fine-tuned model checkpoint permission was successfully deleted."""
-
- object: Literal["checkpoint.permission"]
- """The object type, which is always "checkpoint.permission"."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py
deleted file mode 100644
index 6e66a867..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["PermissionRetrieveParams"]
-
-
-class PermissionRetrieveParams(TypedDict, total=False):
- after: str
- """Identifier for the last permission ID from the previous pagination request."""
-
- limit: int
- """Number of permissions to retrieve."""
-
- order: Literal["ascending", "descending"]
- """The order in which to retrieve permissions."""
-
- project_id: str
- """The ID of the project to get permissions for."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py
deleted file mode 100644
index 6ad8f7a5..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["FineTuneMethod", "Dpo", "DpoHyperparameters", "Supervised", "SupervisedHyperparameters"]
-
-
-class DpoHyperparameters(BaseModel):
- batch_size: Union[Literal["auto"], int, None] = None
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- beta: Union[Literal["auto"], float, None] = None
- """The beta value for the DPO method.
-
- A higher beta value will increase the weight of the penalty between the policy
- and reference model.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float, None] = None
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int, None] = None
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class Dpo(BaseModel):
- hyperparameters: Optional[DpoHyperparameters] = None
- """The hyperparameters used for the fine-tuning job."""
-
-
-class SupervisedHyperparameters(BaseModel):
- batch_size: Union[Literal["auto"], int, None] = None
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float, None] = None
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int, None] = None
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class Supervised(BaseModel):
- hyperparameters: Optional[SupervisedHyperparameters] = None
- """The hyperparameters used for the fine-tuning job."""
-
-
-class FineTuneMethod(BaseModel):
- dpo: Optional[Dpo] = None
- """Configuration for the DPO fine-tuning method."""
-
- supervised: Optional[Supervised] = None
- """Configuration for the supervised fine-tuning method."""
-
- type: Optional[Literal["supervised", "dpo"]] = None
- """The type of method. Is either `supervised` or `dpo`."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py
deleted file mode 100644
index e28abc93..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["FineTuneMethodParam", "Dpo", "DpoHyperparameters", "Supervised", "SupervisedHyperparameters"]
-
-
-class DpoHyperparameters(TypedDict, total=False):
- batch_size: Union[Literal["auto"], int]
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- beta: Union[Literal["auto"], float]
- """The beta value for the DPO method.
-
- A higher beta value will increase the weight of the penalty between the policy
- and reference model.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float]
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int]
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class Dpo(TypedDict, total=False):
- hyperparameters: DpoHyperparameters
- """The hyperparameters used for the fine-tuning job."""
-
-
-class SupervisedHyperparameters(TypedDict, total=False):
- batch_size: Union[Literal["auto"], int]
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float]
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int]
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class Supervised(TypedDict, total=False):
- hyperparameters: SupervisedHyperparameters
- """The hyperparameters used for the fine-tuning job."""
-
-
-class FineTuneMethodParam(TypedDict, total=False):
- dpo: Dpo
- """Configuration for the DPO fine-tuning method."""
-
- supervised: Supervised
- """Configuration for the supervised fine-tuning method."""
-
- type: Literal["supervised", "dpo"]
- """The type of method. Is either `supervised` or `dpo`."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py
deleted file mode 100644
index 29f387a1..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .fine_tune_method import FineTuneMethod
-
-__all__ = ["FineTuningJob", "Error", "Hyperparameters", "Integration", "IntegrationWandb"]
-
-
-class Error(BaseModel):
- code: str
- """A machine-readable error code."""
-
- message: str
- """A human-readable error message."""
-
- param: Optional[str] = None
- """The parameter that was invalid, usually `training_file` or `validation_file`.
-
- This field will be null if the failure was not parameter-specific.
- """
-
-
-class Hyperparameters(BaseModel):
- batch_size: Union[Literal["auto"], int, None] = None
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float, None] = None
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int, None] = None
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class IntegrationWandb(BaseModel):
- project: str
- """The name of the project that the new run will be created under."""
-
- entity: Optional[str] = None
- """The entity to use for the run.
-
- This allows you to set the team or username of the WandB user that you would
- like associated with the run. If not set, the default entity for the registered
- WandB API key is used.
- """
-
- name: Optional[str] = None
- """A display name to set for the run.
-
- If not set, we will use the Job ID as the name.
- """
-
- tags: Optional[List[str]] = None
- """A list of tags to be attached to the newly created run.
-
- These tags are passed through directly to WandB. Some default tags are generated
- by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
- """
-
-
-class Integration(BaseModel):
- type: Literal["wandb"]
- """The type of the integration being enabled for the fine-tuning job"""
-
- wandb: IntegrationWandb
- """The settings for your integration with Weights and Biases.
-
- This payload specifies the project that metrics will be sent to. Optionally, you
- can set an explicit display name for your run, add tags to your run, and set a
- default entity (team, username, etc) to be associated with your run.
- """
-
-
-class FineTuningJob(BaseModel):
- id: str
- """The object identifier, which can be referenced in the API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the fine-tuning job was created."""
-
- error: Optional[Error] = None
- """
- For fine-tuning jobs that have `failed`, this will contain more information on
- the cause of the failure.
- """
-
- fine_tuned_model: Optional[str] = None
- """The name of the fine-tuned model that is being created.
-
- The value will be null if the fine-tuning job is still running.
- """
-
- finished_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the fine-tuning job was finished.
-
- The value will be null if the fine-tuning job is still running.
- """
-
- hyperparameters: Hyperparameters
- """The hyperparameters used for the fine-tuning job.
-
- This value will only be returned when running `supervised` jobs.
- """
-
- model: str
- """The base model that is being fine-tuned."""
-
- object: Literal["fine_tuning.job"]
- """The object type, which is always "fine_tuning.job"."""
-
- organization_id: str
- """The organization that owns the fine-tuning job."""
-
- result_files: List[str]
- """The compiled results file ID(s) for the fine-tuning job.
-
- You can retrieve the results with the
- [Files API](/docs/api-reference/files/retrieve-contents).
- """
-
- seed: int
- """The seed used for the fine-tuning job."""
-
- status: Literal["validating_files", "queued", "running", "succeeded", "failed", "cancelled"]
- """
- The current status of the fine-tuning job, which can be either
- `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
- """
-
- trained_tokens: Optional[int] = None
- """The total number of billable tokens processed by this fine-tuning job.
-
- The value will be null if the fine-tuning job is still running.
- """
-
- training_file: str
- """The file ID used for training.
-
- You can retrieve the training data with the
- [Files API](/docs/api-reference/files/retrieve-contents).
- """
-
- validation_file: Optional[str] = None
- """The file ID used for validation.
-
- You can retrieve the validation results with the
- [Files API](/docs/api-reference/files/retrieve-contents).
- """
-
- estimated_finish: Optional[int] = None
- """
- The Unix timestamp (in seconds) for when the fine-tuning job is estimated to
- finish. The value will be null if the fine-tuning job is not running.
- """
-
- integrations: Optional[List[Integration]] = None
- """A list of integrations to enable for this fine-tuning job."""
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- method: Optional[FineTuneMethod] = None
- """The method used for fine-tuning."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py
deleted file mode 100644
index a538e659..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .fine_tune_method_param import FineTuneMethodParam
-
-__all__ = ["JobCreateParams", "Hyperparameters", "Integration", "IntegrationWandb"]
-
-
-class JobCreateParams(TypedDict, total=False):
- model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]]]
- """The name of the model to fine-tune.
-
- You can select one of the
- [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
- """
-
- training_file: Required[str]
- """The ID of an uploaded file that contains training data.
-
- See [upload file](/docs/api-reference/files/create) for how to upload a file.
-
- Your dataset must be formatted as a JSONL file. Additionally, you must upload
- your file with the purpose `fine-tune`.
-
- The contents of the file should differ depending on if the model uses the
- [chat](/docs/api-reference/fine-tuning/chat-input),
- [completions](/docs/api-reference/fine-tuning/completions-input) format, or if
- the fine-tuning method uses the
- [preference](/docs/api-reference/fine-tuning/preference-input) format.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
- """
-
- hyperparameters: Hyperparameters
- """
- The hyperparameters used for the fine-tuning job. This value is now deprecated
- in favor of `method`, and should be passed in under the `method` parameter.
- """
-
- integrations: Optional[Iterable[Integration]]
- """A list of integrations to enable for your fine-tuning job."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- method: FineTuneMethodParam
- """The method used for fine-tuning."""
-
- seed: Optional[int]
- """The seed controls the reproducibility of the job.
-
- Passing in the same seed and job parameters should produce the same results, but
- may differ in rare cases. If a seed is not specified, one will be generated for
- you.
- """
-
- suffix: Optional[str]
- """
- A string of up to 64 characters that will be added to your fine-tuned model
- name.
-
- For example, a `suffix` of "custom-model-name" would produce a model name like
- `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
- """
-
- validation_file: Optional[str]
- """The ID of an uploaded file that contains validation data.
-
- If you provide this file, the data is used to generate validation metrics
- periodically during fine-tuning. These metrics can be viewed in the fine-tuning
- results file. The same data should not be present in both train and validation
- files.
-
- Your dataset must be formatted as a JSONL file. You must upload your file with
- the purpose `fine-tune`.
-
- See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
- """
-
-
-class Hyperparameters(TypedDict, total=False):
- batch_size: Union[Literal["auto"], int]
- """Number of examples in each batch.
-
- A larger batch size means that model parameters are updated less frequently, but
- with lower variance.
- """
-
- learning_rate_multiplier: Union[Literal["auto"], float]
- """Scaling factor for the learning rate.
-
- A smaller learning rate may be useful to avoid overfitting.
- """
-
- n_epochs: Union[Literal["auto"], int]
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
-
-class IntegrationWandb(TypedDict, total=False):
- project: Required[str]
- """The name of the project that the new run will be created under."""
-
- entity: Optional[str]
- """The entity to use for the run.
-
- This allows you to set the team or username of the WandB user that you would
- like associated with the run. If not set, the default entity for the registered
- WandB API key is used.
- """
-
- name: Optional[str]
- """A display name to set for the run.
-
- If not set, we will use the Job ID as the name.
- """
-
- tags: List[str]
- """A list of tags to be attached to the newly created run.
-
- These tags are passed through directly to WandB. Some default tags are generated
- by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
- """
-
-
-class Integration(TypedDict, total=False):
- type: Required[Literal["wandb"]]
- """The type of integration to enable.
-
- Currently, only "wandb" (Weights and Biases) is supported.
- """
-
- wandb: Required[IntegrationWandb]
- """The settings for your integration with Weights and Biases.
-
- This payload specifies the project that metrics will be sent to. Optionally, you
- can set an explicit display name for your run, add tags to your run, and set a
- default entity (team, username, etc) to be associated with your run.
- """
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py
deleted file mode 100644
index b79f3ce8..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import TypedDict
-
-__all__ = ["JobListParams"]
-
-
-class JobListParams(TypedDict, total=False):
- after: str
- """Identifier for the last job from the previous pagination request."""
-
- limit: int
- """Number of fine-tuning jobs to retrieve."""
-
- metadata: Optional[Dict[str, str]]
- """Optional metadata filter.
-
- To filter, use the syntax `metadata[k]=v`. Alternatively, set `metadata=null` to
- indicate no metadata.
- """
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py
deleted file mode 100644
index ea6eb6a8..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .fine_tuning_job import FineTuningJob
-
-__all__ = ["JobListResponse"]
-
-
-class JobListResponse(BaseModel):
- data: List[FineTuningJob]
-
- has_more: bool
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py
deleted file mode 100644
index 9ba11022..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .event_retrieve_params import EventRetrieveParams as EventRetrieveParams
-from .event_retrieve_response import EventRetrieveResponse as EventRetrieveResponse
-from .checkpoint_retrieve_params import CheckpointRetrieveParams as CheckpointRetrieveParams
-from .checkpoint_retrieve_response import CheckpointRetrieveResponse as CheckpointRetrieveResponse
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py
deleted file mode 100644
index 34666a9f..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["CheckpointRetrieveParams"]
-
-
-class CheckpointRetrieveParams(TypedDict, total=False):
- after: str
- """Identifier for the last checkpoint ID from the previous pagination request."""
-
- limit: int
- """Number of checkpoints to retrieve."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py
deleted file mode 100644
index bf0af44d..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["CheckpointRetrieveResponse", "Data", "DataMetrics"]
-
-
-class DataMetrics(BaseModel):
- full_valid_loss: Optional[float] = None
-
- full_valid_mean_token_accuracy: Optional[float] = None
-
- step: Optional[float] = None
-
- train_loss: Optional[float] = None
-
- train_mean_token_accuracy: Optional[float] = None
-
- valid_loss: Optional[float] = None
-
- valid_mean_token_accuracy: Optional[float] = None
-
-
-class Data(BaseModel):
- id: str
- """The checkpoint identifier, which can be referenced in the API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the checkpoint was created."""
-
- fine_tuned_model_checkpoint: str
- """The name of the fine-tuned checkpoint model that is created."""
-
- fine_tuning_job_id: str
- """The name of the fine-tuning job that this checkpoint was created from."""
-
- metrics: DataMetrics
- """Metrics at the step number during the fine-tuning job."""
-
- object: Literal["fine_tuning.job.checkpoint"]
- """The object type, which is always "fine_tuning.job.checkpoint"."""
-
- step_number: int
- """The step number that the checkpoint was created at."""
-
-
-class CheckpointRetrieveResponse(BaseModel):
- data: List[Data]
-
- has_more: bool
-
- object: Literal["list"]
-
- first_id: Optional[str] = None
-
- last_id: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py
deleted file mode 100644
index f0162e0e..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["EventRetrieveParams"]
-
-
-class EventRetrieveParams(TypedDict, total=False):
- after: str
- """Identifier for the last event from the previous pagination request."""
-
- limit: int
- """Number of events to retrieve."""
diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py
deleted file mode 100644
index 8c22fe30..00000000
--- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import builtins
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["EventRetrieveResponse", "Data"]
-
-
-class Data(BaseModel):
- id: str
- """The object identifier."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the fine-tuning job was created."""
-
- level: Literal["info", "warn", "error"]
- """The log level of the event."""
-
- message: str
- """The message of the event."""
-
- object: Literal["fine_tuning.job.event"]
- """The object type, which is always "fine_tuning.job.event"."""
-
- data: Optional[builtins.object] = None
- """The data associated with the event."""
-
- type: Optional[Literal["message", "metrics"]] = None
- """The type of event."""
-
-
-class EventRetrieveResponse(BaseModel):
- data: List[Data]
-
- has_more: bool
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/function_object.py b/src/digitalocean_genai_sdk/types/function_object.py
deleted file mode 100644
index 4fe27f86..00000000
--- a/src/digitalocean_genai_sdk/types/function_object.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-
-from .._models import BaseModel
-
-__all__ = ["FunctionObject"]
-
-
-class FunctionObject(BaseModel):
- name: str
- """The name of the function to be called.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: Optional[str] = None
- """
- A description of what the function does, used by the model to choose when and
- how to call the function.
- """
-
- parameters: Optional[Dict[str, object]] = None
- """The parameters the functions accepts, described as a JSON Schema object.
-
- See the [guide](/docs/guides/function-calling) for examples, and the
- [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
- documentation about the format.
-
- Omitting `parameters` defines a function with an empty parameter list.
- """
-
- strict: Optional[bool] = None
- """Whether to enable strict schema adherence when generating the function call.
-
- If set to true, the model will follow the exact schema defined in the
- `parameters` field. Only a subset of JSON Schema is supported when `strict` is
- `true`. Learn more about Structured Outputs in the
- [function calling guide](docs/guides/function-calling).
- """
diff --git a/src/digitalocean_genai_sdk/types/function_object_param.py b/src/digitalocean_genai_sdk/types/function_object_param.py
deleted file mode 100644
index 1a358408..00000000
--- a/src/digitalocean_genai_sdk/types/function_object_param.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["FunctionObjectParam"]
-
-
-class FunctionObjectParam(TypedDict, total=False):
- name: Required[str]
- """The name of the function to be called.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: str
- """
- A description of what the function does, used by the model to choose when and
- how to call the function.
- """
-
- parameters: Dict[str, object]
- """The parameters the functions accepts, described as a JSON Schema object.
-
- See the [guide](/docs/guides/function-calling) for examples, and the
- [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
- documentation about the format.
-
- Omitting `parameters` defines a function with an empty parameter list.
- """
-
- strict: Optional[bool]
- """Whether to enable strict schema adherence when generating the function call.
-
- If set to true, the model will follow the exact schema defined in the
- `parameters` field. Only a subset of JSON Schema is supported when `strict` is
- `true`. Learn more about Structured Outputs in the
- [function calling guide](docs/guides/function-calling).
- """
diff --git a/src/digitalocean_genai_sdk/types/function_tool_call.py b/src/digitalocean_genai_sdk/types/function_tool_call.py
deleted file mode 100644
index ecdb4a02..00000000
--- a/src/digitalocean_genai_sdk/types/function_tool_call.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["FunctionToolCall"]
-
-
-class FunctionToolCall(BaseModel):
- arguments: str
- """A JSON string of the arguments to pass to the function."""
-
- call_id: str
- """The unique ID of the function tool call generated by the model."""
-
- name: str
- """The name of the function to run."""
-
- type: Literal["function_call"]
- """The type of the function tool call. Always `function_call`."""
-
- id: Optional[str] = None
- """The unique ID of the function tool call."""
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_output.py b/src/digitalocean_genai_sdk/types/function_tool_call_output.py
deleted file mode 100644
index 4cbe27ce..00000000
--- a/src/digitalocean_genai_sdk/types/function_tool_call_output.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["FunctionToolCallOutput"]
-
-
-class FunctionToolCallOutput(BaseModel):
- call_id: str
- """The unique ID of the function tool call generated by the model."""
-
- output: str
- """A JSON string of the output of the function tool call."""
-
- type: Literal["function_call_output"]
- """The type of the function tool call output. Always `function_call_output`."""
-
- id: Optional[str] = None
- """The unique ID of the function tool call output.
-
- Populated when this item is returned via API.
- """
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py b/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py
deleted file mode 100644
index 49a573ed..00000000
--- a/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["FunctionToolCallOutputParam"]
-
-
-class FunctionToolCallOutputParam(TypedDict, total=False):
- call_id: Required[str]
- """The unique ID of the function tool call generated by the model."""
-
- output: Required[str]
- """A JSON string of the output of the function tool call."""
-
- type: Required[Literal["function_call_output"]]
- """The type of the function tool call output. Always `function_call_output`."""
-
- id: str
- """The unique ID of the function tool call output.
-
- Populated when this item is returned via API.
- """
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_param.py b/src/digitalocean_genai_sdk/types/function_tool_call_param.py
deleted file mode 100644
index 91e076b6..00000000
--- a/src/digitalocean_genai_sdk/types/function_tool_call_param.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["FunctionToolCallParam"]
-
-
-class FunctionToolCallParam(TypedDict, total=False):
- arguments: Required[str]
- """A JSON string of the arguments to pass to the function."""
-
- call_id: Required[str]
- """The unique ID of the function tool call generated by the model."""
-
- name: Required[str]
- """The name of the function to run."""
-
- type: Required[Literal["function_call"]]
- """The type of the function tool call. Always `function_call`."""
-
- id: str
- """The unique ID of the function tool call."""
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/image_create_edit_params.py b/src/digitalocean_genai_sdk/types/image_create_edit_params.py
deleted file mode 100644
index f84f5642..00000000
--- a/src/digitalocean_genai_sdk/types/image_create_edit_params.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["ImageCreateEditParams"]
-
-
-class ImageCreateEditParams(TypedDict, total=False):
- image: Required[FileTypes]
- """The image to edit.
-
- Must be a valid PNG file, less than 4MB, and square. If mask is not provided,
- image must have transparency, which will be used as the mask.
- """
-
- prompt: Required[str]
- """A text description of the desired image(s).
-
- The maximum length is 1000 characters.
- """
-
- mask: FileTypes
- """An additional image whose fully transparent areas (e.g.
-
- where alpha is zero) indicate where `image` should be edited. Must be a valid
- PNG file, less than 4MB, and have the same dimensions as `image`.
- """
-
- model: Union[str, Literal["dall-e-2"], None]
- """The model to use for image generation.
-
- Only `dall-e-2` is supported at this time.
- """
-
- n: Optional[int]
- """The number of images to generate. Must be between 1 and 10."""
-
- response_format: Optional[Literal["url", "b64_json"]]
- """The format in which the generated images are returned.
-
- Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
- image has been generated.
- """
-
- size: Optional[Literal["256x256", "512x512", "1024x1024"]]
- """The size of the generated images.
-
- Must be one of `256x256`, `512x512`, or `1024x1024`.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/image_create_generation_params.py b/src/digitalocean_genai_sdk/types/image_create_generation_params.py
deleted file mode 100644
index e8cfbb18..00000000
--- a/src/digitalocean_genai_sdk/types/image_create_generation_params.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ImageCreateGenerationParams"]
-
-
-class ImageCreateGenerationParams(TypedDict, total=False):
- prompt: Required[str]
- """A text description of the desired image(s).
-
- The maximum length is 1000 characters for `dall-e-2` and 4000 characters for
- `dall-e-3`.
- """
-
- model: Union[str, Literal["dall-e-2", "dall-e-3"], None]
- """The model to use for image generation."""
-
- n: Optional[int]
- """The number of images to generate.
-
- Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
- """
-
- quality: Literal["standard", "hd"]
- """The quality of the image that will be generated.
-
- `hd` creates images with finer details and greater consistency across the image.
- This param is only supported for `dall-e-3`.
- """
-
- response_format: Optional[Literal["url", "b64_json"]]
- """The format in which the generated images are returned.
-
- Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
- image has been generated.
- """
-
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]]
- """The size of the generated images.
-
- Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one
- of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.
- """
-
- style: Optional[Literal["vivid", "natural"]]
- """The style of the generated images.
-
- Must be one of `vivid` or `natural`. Vivid causes the model to lean towards
- generating hyper-real and dramatic images. Natural causes the model to produce
- more natural, less hyper-real looking images. This param is only supported for
- `dall-e-3`.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/image_create_variation_params.py b/src/digitalocean_genai_sdk/types/image_create_variation_params.py
deleted file mode 100644
index 64245a05..00000000
--- a/src/digitalocean_genai_sdk/types/image_create_variation_params.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["ImageCreateVariationParams"]
-
-
-class ImageCreateVariationParams(TypedDict, total=False):
- image: Required[FileTypes]
- """The image to use as the basis for the variation(s).
-
- Must be a valid PNG file, less than 4MB, and square.
- """
-
- model: Union[str, Literal["dall-e-2"], None]
- """The model to use for image generation.
-
- Only `dall-e-2` is supported at this time.
- """
-
- n: Optional[int]
- """The number of images to generate.
-
- Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
- """
-
- response_format: Optional[Literal["url", "b64_json"]]
- """The format in which the generated images are returned.
-
- Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
- image has been generated.
- """
-
- size: Optional[Literal["256x256", "512x512", "1024x1024"]]
- """The size of the generated images.
-
- Must be one of `256x256`, `512x512`, or `1024x1024`.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/images_response.py b/src/digitalocean_genai_sdk/types/images_response.py
deleted file mode 100644
index 509e0069..00000000
--- a/src/digitalocean_genai_sdk/types/images_response.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-
-from .._models import BaseModel
-
-__all__ = ["ImagesResponse", "Data"]
-
-
-class Data(BaseModel):
- b64_json: Optional[str] = None
- """
- The base64-encoded JSON of the generated image, if `response_format` is
- `b64_json`.
- """
-
- revised_prompt: Optional[str] = None
- """
- The prompt that was used to generate the image, if there was any revision to the
- prompt.
- """
-
- url: Optional[str] = None
- """The URL of the generated image, if `response_format` is `url` (default)."""
-
-
-class ImagesResponse(BaseModel):
- created: int
-
- data: List[Data]
diff --git a/src/digitalocean_genai_sdk/types/includable.py b/src/digitalocean_genai_sdk/types/includable.py
deleted file mode 100644
index 8b4920a2..00000000
--- a/src/digitalocean_genai_sdk/types/includable.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["Includable"]
-
-Includable: TypeAlias = Literal[
- "file_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url"
-]
diff --git a/src/digitalocean_genai_sdk/types/input_content.py b/src/digitalocean_genai_sdk/types/input_content.py
deleted file mode 100644
index 04e37845..00000000
--- a/src/digitalocean_genai_sdk/types/input_content.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-
-__all__ = ["InputContent", "InputText", "InputImage", "InputFile"]
-
-
-class InputText(BaseModel):
- text: str
- """The text input to the model."""
-
- type: Literal["input_text"]
- """The type of the input item. Always `input_text`."""
-
-
-class InputImage(BaseModel):
- detail: Literal["high", "low", "auto"]
- """The detail level of the image to be sent to the model.
-
- One of `high`, `low`, or `auto`. Defaults to `auto`.
- """
-
- type: Literal["input_image"]
- """The type of the input item. Always `input_image`."""
-
- file_id: Optional[str] = None
- """The ID of the file to be sent to the model."""
-
- image_url: Optional[str] = None
- """The URL of the image to be sent to the model.
-
- A fully qualified URL or base64 encoded image in a data URL.
- """
-
-
-class InputFile(BaseModel):
- type: Literal["input_file"]
- """The type of the input item. Always `input_file`."""
-
- file_data: Optional[str] = None
- """The content of the file to be sent to the model."""
-
- file_id: Optional[str] = None
- """The ID of the file to be sent to the model."""
-
- filename: Optional[str] = None
- """The name of the file to be sent to the model."""
-
-
-InputContent: TypeAlias = Union[InputText, InputImage, InputFile]
diff --git a/src/digitalocean_genai_sdk/types/input_content_param.py b/src/digitalocean_genai_sdk/types/input_content_param.py
deleted file mode 100644
index ed0bdf62..00000000
--- a/src/digitalocean_genai_sdk/types/input_content_param.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = ["InputContentParam", "InputText", "InputImage", "InputFile"]
-
-
-class InputText(TypedDict, total=False):
- text: Required[str]
- """The text input to the model."""
-
- type: Required[Literal["input_text"]]
- """The type of the input item. Always `input_text`."""
-
-
-class InputImage(TypedDict, total=False):
- detail: Required[Literal["high", "low", "auto"]]
- """The detail level of the image to be sent to the model.
-
- One of `high`, `low`, or `auto`. Defaults to `auto`.
- """
-
- type: Required[Literal["input_image"]]
- """The type of the input item. Always `input_image`."""
-
- file_id: Optional[str]
- """The ID of the file to be sent to the model."""
-
- image_url: Optional[str]
- """The URL of the image to be sent to the model.
-
- A fully qualified URL or base64 encoded image in a data URL.
- """
-
-
-class InputFile(TypedDict, total=False):
- type: Required[Literal["input_file"]]
- """The type of the input item. Always `input_file`."""
-
- file_data: str
- """The content of the file to be sent to the model."""
-
- file_id: str
- """The ID of the file to be sent to the model."""
-
- filename: str
- """The name of the file to be sent to the model."""
-
-
-InputContentParam: TypeAlias = Union[InputText, InputImage, InputFile]
diff --git a/src/digitalocean_genai_sdk/types/input_message.py b/src/digitalocean_genai_sdk/types/input_message.py
deleted file mode 100644
index 4dc5526f..00000000
--- a/src/digitalocean_genai_sdk/types/input_message.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .input_content import InputContent
-
-__all__ = ["InputMessage"]
-
-
-class InputMessage(BaseModel):
- content: List[InputContent]
- """
- A list of one or many input items to the model, containing different content
- types.
- """
-
- role: Literal["user", "system", "developer"]
- """The role of the message input. One of `user`, `system`, or `developer`."""
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
-
- type: Optional[Literal["message"]] = None
- """The type of the message input. Always set to `message`."""
diff --git a/src/digitalocean_genai_sdk/types/input_message_param.py b/src/digitalocean_genai_sdk/types/input_message_param.py
deleted file mode 100644
index 388c54ca..00000000
--- a/src/digitalocean_genai_sdk/types/input_message_param.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-from .input_content_param import InputContentParam
-
-__all__ = ["InputMessageParam"]
-
-
-class InputMessageParam(TypedDict, total=False):
- content: Required[Iterable[InputContentParam]]
- """
- A list of one or many input items to the model, containing different content
- types.
- """
-
- role: Required[Literal["user", "system", "developer"]]
- """The role of the message input. One of `user`, `system`, or `developer`."""
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
-
- type: Literal["message"]
- """The type of the message input. Always set to `message`."""
diff --git a/src/digitalocean_genai_sdk/types/model.py b/src/digitalocean_genai_sdk/types/model.py
deleted file mode 100644
index 2631ee8d..00000000
--- a/src/digitalocean_genai_sdk/types/model.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["Model"]
-
-
-class Model(BaseModel):
- id: str
- """The model identifier, which can be referenced in the API endpoints."""
-
- created: int
- """The Unix timestamp (in seconds) when the model was created."""
-
- object: Literal["model"]
- """The object type, which is always "model"."""
-
- owned_by: str
- """The organization that owns the model."""
diff --git a/src/digitalocean_genai_sdk/types/model_delete_response.py b/src/digitalocean_genai_sdk/types/model_delete_response.py
deleted file mode 100644
index 63b2d296..00000000
--- a/src/digitalocean_genai_sdk/types/model_delete_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-
-__all__ = ["ModelDeleteResponse"]
-
-
-class ModelDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/model_list_response.py b/src/digitalocean_genai_sdk/types/model_list_response.py
deleted file mode 100644
index 8f835449..00000000
--- a/src/digitalocean_genai_sdk/types/model_list_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from .model import Model
-from .._models import BaseModel
-
-__all__ = ["ModelListResponse"]
-
-
-class ModelListResponse(BaseModel):
- data: List[Model]
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/model_response_properties.py b/src/digitalocean_genai_sdk/types/model_response_properties.py
deleted file mode 100644
index 547c6391..00000000
--- a/src/digitalocean_genai_sdk/types/model_response_properties.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-
-from .._models import BaseModel
-
-__all__ = ["ModelResponseProperties"]
-
-
-class ModelResponseProperties(BaseModel):
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- temperature: Optional[float] = None
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic. We generally recommend altering
- this or `top_p` but not both.
- """
-
- top_p: Optional[float] = None
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
- """
-
- user: Optional[str] = None
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
diff --git a/src/digitalocean_genai_sdk/types/moderation_classify_params.py b/src/digitalocean_genai_sdk/types/moderation_classify_params.py
deleted file mode 100644
index bcc99a1e..00000000
--- a/src/digitalocean_genai_sdk/types/moderation_classify_params.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = [
- "ModerationClassifyParams",
- "InputUnionMember2",
- "InputUnionMember2UnionMember0",
- "InputUnionMember2UnionMember0ImageURL",
- "InputUnionMember2UnionMember1",
-]
-
-
-class ModerationClassifyParams(TypedDict, total=False):
- input: Required[Union[str, List[str], Iterable[InputUnionMember2]]]
- """Input (or inputs) to classify.
-
- Can be a single string, an array of strings, or an array of multi-modal input
- objects similar to other models.
- """
-
- model: Union[
- str,
- Literal[
- "omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable"
- ],
- ]
- """The content moderation model you would like to use.
-
- Learn more in [the moderation guide](/docs/guides/moderation), and learn about
- available models [here](/docs/models#moderation).
- """
-
-
-class InputUnionMember2UnionMember0ImageURL(TypedDict, total=False):
- url: Required[str]
- """Either a URL of the image or the base64 encoded image data."""
-
-
-class InputUnionMember2UnionMember0(TypedDict, total=False):
- image_url: Required[InputUnionMember2UnionMember0ImageURL]
- """Contains either an image URL or a data URL for a base64 encoded image."""
-
- type: Required[Literal["image_url"]]
- """Always `image_url`."""
-
-
-class InputUnionMember2UnionMember1(TypedDict, total=False):
- text: Required[str]
- """A string of text to classify."""
-
- type: Required[Literal["text"]]
- """Always `text`."""
-
-
-InputUnionMember2: TypeAlias = Union[InputUnionMember2UnionMember0, InputUnionMember2UnionMember1]
diff --git a/src/digitalocean_genai_sdk/types/moderation_classify_response.py b/src/digitalocean_genai_sdk/types/moderation_classify_response.py
deleted file mode 100644
index cfda7318..00000000
--- a/src/digitalocean_genai_sdk/types/moderation_classify_response.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-
-__all__ = [
- "ModerationClassifyResponse",
- "Result",
- "ResultCategories",
- "ResultCategoryAppliedInputTypes",
- "ResultCategoryScores",
-]
-
-
-class ResultCategories(BaseModel):
- harassment: bool
- """
- Content that expresses, incites, or promotes harassing language towards any
- target.
- """
-
- harassment_threatening: bool = FieldInfo(alias="harassment/threatening")
- """
- Harassment content that also includes violence or serious harm towards any
- target.
- """
-
- hate: bool
- """
- Content that expresses, incites, or promotes hate based on race, gender,
- ethnicity, religion, nationality, sexual orientation, disability status, or
- caste. Hateful content aimed at non-protected groups (e.g., chess players) is
- harassment.
- """
-
- hate_threatening: bool = FieldInfo(alias="hate/threatening")
- """
- Hateful content that also includes violence or serious harm towards the targeted
- group based on race, gender, ethnicity, religion, nationality, sexual
- orientation, disability status, or caste.
- """
-
- illicit: Optional[bool] = None
- """
- Content that includes instructions or advice that facilitate the planning or
- execution of wrongdoing, or that gives advice or instruction on how to commit
- illicit acts. For example, "how to shoplift" would fit this category.
- """
-
- illicit_violent: Optional[bool] = FieldInfo(alias="illicit/violent", default=None)
- """
- Content that includes instructions or advice that facilitate the planning or
- execution of wrongdoing that also includes violence, or that gives advice or
- instruction on the procurement of any weapon.
- """
-
- self_harm: bool = FieldInfo(alias="self-harm")
- """
- Content that promotes, encourages, or depicts acts of self-harm, such as
- suicide, cutting, and eating disorders.
- """
-
- self_harm_instructions: bool = FieldInfo(alias="self-harm/instructions")
- """
- Content that encourages performing acts of self-harm, such as suicide, cutting,
- and eating disorders, or that gives instructions or advice on how to commit such
- acts.
- """
-
- self_harm_intent: bool = FieldInfo(alias="self-harm/intent")
- """
- Content where the speaker expresses that they are engaging or intend to engage
- in acts of self-harm, such as suicide, cutting, and eating disorders.
- """
-
- sexual: bool
- """
- Content meant to arouse sexual excitement, such as the description of sexual
- activity, or that promotes sexual services (excluding sex education and
- wellness).
- """
-
- sexual_minors: bool = FieldInfo(alias="sexual/minors")
- """Sexual content that includes an individual who is under 18 years old."""
-
- violence: bool
- """Content that depicts death, violence, or physical injury."""
-
- violence_graphic: bool = FieldInfo(alias="violence/graphic")
- """Content that depicts death, violence, or physical injury in graphic detail."""
-
-
-class ResultCategoryAppliedInputTypes(BaseModel):
- harassment: List[Literal["text"]]
- """The applied input type(s) for the category 'harassment'."""
-
- harassment_threatening: List[Literal["text"]] = FieldInfo(alias="harassment/threatening")
- """The applied input type(s) for the category 'harassment/threatening'."""
-
- hate: List[Literal["text"]]
- """The applied input type(s) for the category 'hate'."""
-
- hate_threatening: List[Literal["text"]] = FieldInfo(alias="hate/threatening")
- """The applied input type(s) for the category 'hate/threatening'."""
-
- illicit: List[Literal["text"]]
- """The applied input type(s) for the category 'illicit'."""
-
- illicit_violent: List[Literal["text"]] = FieldInfo(alias="illicit/violent")
- """The applied input type(s) for the category 'illicit/violent'."""
-
- self_harm: List[Literal["text", "image"]] = FieldInfo(alias="self-harm")
- """The applied input type(s) for the category 'self-harm'."""
-
- self_harm_instructions: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/instructions")
- """The applied input type(s) for the category 'self-harm/instructions'."""
-
- self_harm_intent: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/intent")
- """The applied input type(s) for the category 'self-harm/intent'."""
-
- sexual: List[Literal["text", "image"]]
- """The applied input type(s) for the category 'sexual'."""
-
- sexual_minors: List[Literal["text"]] = FieldInfo(alias="sexual/minors")
- """The applied input type(s) for the category 'sexual/minors'."""
-
- violence: List[Literal["text", "image"]]
- """The applied input type(s) for the category 'violence'."""
-
- violence_graphic: List[Literal["text", "image"]] = FieldInfo(alias="violence/graphic")
- """The applied input type(s) for the category 'violence/graphic'."""
-
-
-class ResultCategoryScores(BaseModel):
- harassment: float
- """The score for the category 'harassment'."""
-
- harassment_threatening: float = FieldInfo(alias="harassment/threatening")
- """The score for the category 'harassment/threatening'."""
-
- hate: float
- """The score for the category 'hate'."""
-
- hate_threatening: float = FieldInfo(alias="hate/threatening")
- """The score for the category 'hate/threatening'."""
-
- illicit: float
- """The score for the category 'illicit'."""
-
- illicit_violent: float = FieldInfo(alias="illicit/violent")
- """The score for the category 'illicit/violent'."""
-
- self_harm: float = FieldInfo(alias="self-harm")
- """The score for the category 'self-harm'."""
-
- self_harm_instructions: float = FieldInfo(alias="self-harm/instructions")
- """The score for the category 'self-harm/instructions'."""
-
- self_harm_intent: float = FieldInfo(alias="self-harm/intent")
- """The score for the category 'self-harm/intent'."""
-
- sexual: float
- """The score for the category 'sexual'."""
-
- sexual_minors: float = FieldInfo(alias="sexual/minors")
- """The score for the category 'sexual/minors'."""
-
- violence: float
- """The score for the category 'violence'."""
-
- violence_graphic: float = FieldInfo(alias="violence/graphic")
- """The score for the category 'violence/graphic'."""
-
-
-class Result(BaseModel):
- categories: ResultCategories
- """A list of the categories, and whether they are flagged or not."""
-
- category_applied_input_types: ResultCategoryAppliedInputTypes
- """
- A list of the categories along with the input type(s) that the score applies to.
- """
-
- category_scores: ResultCategoryScores
- """A list of the categories along with their scores as predicted by model."""
-
- flagged: bool
- """Whether any of the below categories are flagged."""
-
-
-class ModerationClassifyResponse(BaseModel):
- id: str
- """The unique identifier for the moderation request."""
-
- model: str
- """The model used to generate the moderation results."""
-
- results: List[Result]
- """A list of moderation objects."""
diff --git a/src/digitalocean_genai_sdk/types/openai_file.py b/src/digitalocean_genai_sdk/types/openai_file.py
deleted file mode 100644
index a8398a35..00000000
--- a/src/digitalocean_genai_sdk/types/openai_file.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["OpenAIFile"]
-
-
-class OpenAIFile(BaseModel):
- id: str
- """The file identifier, which can be referenced in the API endpoints."""
-
- bytes: int
- """The size of the file, in bytes."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the file was created."""
-
- filename: str
- """The name of the file."""
-
- object: Literal["file"]
- """The object type, which is always `file`."""
-
- purpose: Literal[
- "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "vision"
- ]
- """The intended purpose of the file.
-
- Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`,
- `fine-tune`, `fine-tune-results` and `vision`.
- """
-
- status: Literal["uploaded", "processed", "error"]
- """Deprecated.
-
- The current status of the file, which can be either `uploaded`, `processed`, or
- `error`.
- """
-
- expires_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the file will expire."""
-
- status_details: Optional[str] = None
- """Deprecated.
-
- For details on why a fine-tuning training file failed validation, see the
- `error` field on `fine_tuning.job`.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/__init__.py b/src/digitalocean_genai_sdk/types/organization/__init__.py
deleted file mode 100644
index 5b34f495..00000000
--- a/src/digitalocean_genai_sdk/types/organization/__init__.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .invite import Invite as Invite
-from .project import Project as Project
-from .admin_api_key import AdminAPIKey as AdminAPIKey
-from .user_list_params import UserListParams as UserListParams
-from .organization_user import OrganizationUser as OrganizationUser
-from .invite_list_params import InviteListParams as InviteListParams
-from .user_list_response import UserListResponse as UserListResponse
-from .user_update_params import UserUpdateParams as UserUpdateParams
-from .project_list_params import ProjectListParams as ProjectListParams
-from .usage_images_params import UsageImagesParams as UsageImagesParams
-from .invite_create_params import InviteCreateParams as InviteCreateParams
-from .invite_list_response import InviteListResponse as InviteListResponse
-from .user_delete_response import UserDeleteResponse as UserDeleteResponse
-from .project_create_params import ProjectCreateParams as ProjectCreateParams
-from .project_list_response import ProjectListResponse as ProjectListResponse
-from .project_update_params import ProjectUpdateParams as ProjectUpdateParams
-from .invite_delete_response import InviteDeleteResponse as InviteDeleteResponse
-from .usage_embeddings_params import UsageEmbeddingsParams as UsageEmbeddingsParams
-from .usage_completions_params import UsageCompletionsParams as UsageCompletionsParams
-from .usage_moderations_params import UsageModerationsParams as UsageModerationsParams
-from .admin_api_key_list_params import AdminAPIKeyListParams as AdminAPIKeyListParams
-from .usage_vector_stores_params import UsageVectorStoresParams as UsageVectorStoresParams
-from .admin_api_key_create_params import AdminAPIKeyCreateParams as AdminAPIKeyCreateParams
-from .admin_api_key_list_response import AdminAPIKeyListResponse as AdminAPIKeyListResponse
-from .usage_audio_speeches_params import UsageAudioSpeechesParams as UsageAudioSpeechesParams
-from .admin_api_key_delete_response import AdminAPIKeyDeleteResponse as AdminAPIKeyDeleteResponse
-from .usage_audio_transcriptions_params import UsageAudioTranscriptionsParams as UsageAudioTranscriptionsParams
-from .usage_code_interpreter_sessions_params import (
- UsageCodeInterpreterSessionsParams as UsageCodeInterpreterSessionsParams,
-)
diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key.py
deleted file mode 100644
index 8a57458f..00000000
--- a/src/digitalocean_genai_sdk/types/organization/admin_api_key.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ..._models import BaseModel
-
-__all__ = ["AdminAPIKey", "Owner"]
-
-
-class Owner(BaseModel):
- id: Optional[str] = None
-
- created_at: Optional[int] = None
-
- name: Optional[str] = None
-
- role: Optional[str] = None
-
- type: Optional[str] = None
-
-
-class AdminAPIKey(BaseModel):
- id: Optional[str] = None
-
- created_at: Optional[int] = None
-
- name: Optional[str] = None
-
- object: Optional[str] = None
-
- owner: Optional[Owner] = None
-
- redacted_value: Optional[str] = None
-
- value: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py
deleted file mode 100644
index dccdfb8a..00000000
--- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["AdminAPIKeyCreateParams"]
-
-
-class AdminAPIKeyCreateParams(TypedDict, total=False):
- name: Required[str]
diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py
deleted file mode 100644
index b752558c..00000000
--- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ..._models import BaseModel
-
-__all__ = ["AdminAPIKeyDeleteResponse"]
-
-
-class AdminAPIKeyDeleteResponse(BaseModel):
- id: Optional[str] = None
-
- deleted: Optional[bool] = None
-
- object: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py
deleted file mode 100644
index c3b3f510..00000000
--- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["AdminAPIKeyListParams"]
-
-
-class AdminAPIKeyListParams(TypedDict, total=False):
- after: Optional[str]
- """Return keys with IDs that come after this ID in the pagination order."""
-
- limit: int
- """Maximum number of keys to return."""
-
- order: Literal["asc", "desc"]
- """Order results by creation time, ascending or descending."""
diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py
deleted file mode 100644
index 8ef9beb7..00000000
--- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-
-from ..._models import BaseModel
-from .admin_api_key import AdminAPIKey
-
-__all__ = ["AdminAPIKeyListResponse"]
-
-
-class AdminAPIKeyListResponse(BaseModel):
- data: Optional[List[AdminAPIKey]] = None
-
- first_id: Optional[str] = None
-
- has_more: Optional[bool] = None
-
- last_id: Optional[str] = None
-
- object: Optional[str] = None
diff --git a/src/digitalocean_genai_sdk/types/organization/invite.py b/src/digitalocean_genai_sdk/types/organization/invite.py
deleted file mode 100644
index fd495caf..00000000
--- a/src/digitalocean_genai_sdk/types/organization/invite.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["Invite", "Project"]
-
-
-class Project(BaseModel):
- id: Optional[str] = None
- """Project's public ID"""
-
- role: Optional[Literal["member", "owner"]] = None
- """Project membership role"""
-
-
-class Invite(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- email: str
- """The email address of the individual to whom the invite was sent"""
-
- expires_at: int
- """The Unix timestamp (in seconds) of when the invite expires."""
-
- invited_at: int
- """The Unix timestamp (in seconds) of when the invite was sent."""
-
- object: Literal["organization.invite"]
- """The object type, which is always `organization.invite`"""
-
- role: Literal["owner", "reader"]
- """`owner` or `reader`"""
-
- status: Literal["accepted", "expired", "pending"]
- """`accepted`,`expired`, or `pending`"""
-
- accepted_at: Optional[int] = None
- """The Unix timestamp (in seconds) of when the invite was accepted."""
-
- projects: Optional[List[Project]] = None
- """The projects that were granted membership upon acceptance of the invite."""
diff --git a/src/digitalocean_genai_sdk/types/organization/invite_create_params.py b/src/digitalocean_genai_sdk/types/organization/invite_create_params.py
deleted file mode 100644
index 7709003f..00000000
--- a/src/digitalocean_genai_sdk/types/organization/invite_create_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["InviteCreateParams", "Project"]
-
-
-class InviteCreateParams(TypedDict, total=False):
- email: Required[str]
- """Send an email to this address"""
-
- role: Required[Literal["reader", "owner"]]
- """`owner` or `reader`"""
-
- projects: Iterable[Project]
- """
- An array of projects to which membership is granted at the same time the org
- invite is accepted. If omitted, the user will be invited to the default project
- for compatibility with legacy behavior.
- """
-
-
-class Project(TypedDict, total=False):
- id: Required[str]
- """Project's public ID"""
-
- role: Required[Literal["member", "owner"]]
- """Project membership role"""
diff --git a/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py b/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py
deleted file mode 100644
index 52bd47b9..00000000
--- a/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["InviteDeleteResponse"]
-
-
-class InviteDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["organization.invite.deleted"]
- """The object type, which is always `organization.invite.deleted`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/invite_list_params.py b/src/digitalocean_genai_sdk/types/organization/invite_list_params.py
deleted file mode 100644
index 678510d6..00000000
--- a/src/digitalocean_genai_sdk/types/organization/invite_list_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["InviteListParams"]
-
-
-class InviteListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/invite_list_response.py b/src/digitalocean_genai_sdk/types/organization/invite_list_response.py
deleted file mode 100644
index 2b646289..00000000
--- a/src/digitalocean_genai_sdk/types/organization/invite_list_response.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .invite import Invite
-from ..._models import BaseModel
-
-__all__ = ["InviteListResponse"]
-
-
-class InviteListResponse(BaseModel):
- data: List[Invite]
-
- object: Literal["list"]
- """The object type, which is always `list`"""
-
- first_id: Optional[str] = None
- """The first `invite_id` in the retrieved `list`"""
-
- has_more: Optional[bool] = None
- """
- The `has_more` property is used for pagination to indicate there are additional
- results.
- """
-
- last_id: Optional[str] = None
- """The last `invite_id` in the retrieved `list`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/organization_user.py b/src/digitalocean_genai_sdk/types/organization/organization_user.py
deleted file mode 100644
index 890833f1..00000000
--- a/src/digitalocean_genai_sdk/types/organization/organization_user.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["OrganizationUser"]
-
-
-class OrganizationUser(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- added_at: int
- """The Unix timestamp (in seconds) of when the user was added."""
-
- email: str
- """The email address of the user"""
-
- name: str
- """The name of the user"""
-
- object: Literal["organization.user"]
- """The object type, which is always `organization.user`"""
-
- role: Literal["owner", "reader"]
- """`owner` or `reader`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/project.py b/src/digitalocean_genai_sdk/types/organization/project.py
deleted file mode 100644
index 731e8609..00000000
--- a/src/digitalocean_genai_sdk/types/organization/project.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["Project"]
-
-
-class Project(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- created_at: int
- """The Unix timestamp (in seconds) of when the project was created."""
-
- name: str
- """The name of the project. This appears in reporting."""
-
- object: Literal["organization.project"]
- """The object type, which is always `organization.project`"""
-
- status: Literal["active", "archived"]
- """`active` or `archived`"""
-
- archived_at: Optional[int] = None
- """The Unix timestamp (in seconds) of when the project was archived or `null`."""
diff --git a/src/digitalocean_genai_sdk/types/organization/project_create_params.py b/src/digitalocean_genai_sdk/types/organization/project_create_params.py
deleted file mode 100644
index 0c18bc5b..00000000
--- a/src/digitalocean_genai_sdk/types/organization/project_create_params.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ProjectCreateParams"]
-
-
-class ProjectCreateParams(TypedDict, total=False):
- name: Required[str]
- """The friendly name of the project, this name appears in reports."""
diff --git a/src/digitalocean_genai_sdk/types/organization/project_list_params.py b/src/digitalocean_genai_sdk/types/organization/project_list_params.py
deleted file mode 100644
index f55fb8a3..00000000
--- a/src/digitalocean_genai_sdk/types/organization/project_list_params.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["ProjectListParams"]
-
-
-class ProjectListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- include_archived: bool
- """If `true` returns all projects including those that have been `archived`.
-
- Archived projects are not included by default.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/project_list_response.py b/src/digitalocean_genai_sdk/types/organization/project_list_response.py
deleted file mode 100644
index 24a79f63..00000000
--- a/src/digitalocean_genai_sdk/types/organization/project_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from .project import Project
-from ..._models import BaseModel
-
-__all__ = ["ProjectListResponse"]
-
-
-class ProjectListResponse(BaseModel):
- data: List[Project]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/organization/project_update_params.py b/src/digitalocean_genai_sdk/types/organization/project_update_params.py
deleted file mode 100644
index 0ba1984a..00000000
--- a/src/digitalocean_genai_sdk/types/organization/project_update_params.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ProjectUpdateParams"]
-
-
-class ProjectUpdateParams(TypedDict, total=False):
- name: Required[str]
- """The updated name of the project, this name appears in reports."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/__init__.py b/src/digitalocean_genai_sdk/types/organization/projects/__init__.py
deleted file mode 100644
index 4b0e0f9b..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .api_key import APIKey as APIKey
-from .rate_limit import RateLimit as RateLimit
-from .project_user import ProjectUser as ProjectUser
-from .service_account import ServiceAccount as ServiceAccount
-from .user_add_params import UserAddParams as UserAddParams
-from .user_list_params import UserListParams as UserListParams
-from .user_list_response import UserListResponse as UserListResponse
-from .user_update_params import UserUpdateParams as UserUpdateParams
-from .api_key_list_params import APIKeyListParams as APIKeyListParams
-from .user_delete_response import UserDeleteResponse as UserDeleteResponse
-from .api_key_list_response import APIKeyListResponse as APIKeyListResponse
-from .rate_limit_list_params import RateLimitListParams as RateLimitListParams
-from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse
-from .rate_limit_list_response import RateLimitListResponse as RateLimitListResponse
-from .rate_limit_update_params import RateLimitUpdateParams as RateLimitUpdateParams
-from .service_account_list_params import ServiceAccountListParams as ServiceAccountListParams
-from .service_account_create_params import ServiceAccountCreateParams as ServiceAccountCreateParams
-from .service_account_list_response import ServiceAccountListResponse as ServiceAccountListResponse
-from .service_account_create_response import ServiceAccountCreateResponse as ServiceAccountCreateResponse
-from .service_account_delete_response import ServiceAccountDeleteResponse as ServiceAccountDeleteResponse
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key.py
deleted file mode 100644
index 276f6d9b..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/api_key.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-from .project_user import ProjectUser
-from .service_account import ServiceAccount
-
-__all__ = ["APIKey", "Owner"]
-
-
-class Owner(BaseModel):
- service_account: Optional[ServiceAccount] = None
- """Represents an individual service account in a project."""
-
- type: Optional[Literal["user", "service_account"]] = None
- """`user` or `service_account`"""
-
- user: Optional[ProjectUser] = None
- """Represents an individual user in a project."""
-
-
-class APIKey(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- created_at: int
- """The Unix timestamp (in seconds) of when the API key was created"""
-
- name: str
- """The name of the API key"""
-
- object: Literal["organization.project.api_key"]
- """The object type, which is always `organization.project.api_key`"""
-
- owner: Owner
-
- redacted_value: str
- """The redacted value of the API key"""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py
deleted file mode 100644
index c3ec64bd..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["APIKeyDeleteResponse"]
-
-
-class APIKeyDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["organization.project.api_key.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py
deleted file mode 100644
index 422a2851..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["APIKeyListParams"]
-
-
-class APIKeyListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py
deleted file mode 100644
index 669de6c6..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from .api_key import APIKey
-from ...._models import BaseModel
-
-__all__ = ["APIKeyListResponse"]
-
-
-class APIKeyListResponse(BaseModel):
- data: List[APIKey]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/project_user.py b/src/digitalocean_genai_sdk/types/organization/projects/project_user.py
deleted file mode 100644
index afcdb514..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/project_user.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["ProjectUser"]
-
-
-class ProjectUser(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- added_at: int
- """The Unix timestamp (in seconds) of when the project was added."""
-
- email: str
- """The email address of the user"""
-
- name: str
- """The name of the user"""
-
- object: Literal["organization.project.user"]
- """The object type, which is always `organization.project.user`"""
-
- role: Literal["owner", "member"]
- """`owner` or `member`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py
deleted file mode 100644
index 1a9795f5..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["RateLimit"]
-
-
-class RateLimit(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- max_requests_per_1_minute: int
- """The maximum requests per minute."""
-
- max_tokens_per_1_minute: int
- """The maximum tokens per minute."""
-
- model: str
- """The model this rate limit applies to."""
-
- object: Literal["project.rate_limit"]
- """The object type, which is always `project.rate_limit`"""
-
- batch_1_day_max_input_tokens: Optional[int] = None
- """The maximum batch input tokens per day. Only present for relevant models."""
-
- max_audio_megabytes_per_1_minute: Optional[int] = None
- """The maximum audio megabytes per minute. Only present for relevant models."""
-
- max_images_per_1_minute: Optional[int] = None
- """The maximum images per minute. Only present for relevant models."""
-
- max_requests_per_1_day: Optional[int] = None
- """The maximum requests per day. Only present for relevant models."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py
deleted file mode 100644
index aa007e5f..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["RateLimitListParams"]
-
-
-class RateLimitListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, beginning with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- limit: int
- """A limit on the number of objects to be returned. The default is 100."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py
deleted file mode 100644
index f2133f3e..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-from .rate_limit import RateLimit
-
-__all__ = ["RateLimitListResponse"]
-
-
-class RateLimitListResponse(BaseModel):
- data: List[RateLimit]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py
deleted file mode 100644
index a303d6f4..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["RateLimitUpdateParams"]
-
-
-class RateLimitUpdateParams(TypedDict, total=False):
- project_id: Required[str]
-
- batch_1_day_max_input_tokens: int
- """The maximum batch input tokens per day. Only relevant for certain models."""
-
- max_audio_megabytes_per_1_minute: int
- """The maximum audio megabytes per minute. Only relevant for certain models."""
-
- max_images_per_1_minute: int
- """The maximum images per minute. Only relevant for certain models."""
-
- max_requests_per_1_day: int
- """The maximum requests per day. Only relevant for certain models."""
-
- max_requests_per_1_minute: int
- """The maximum requests per minute."""
-
- max_tokens_per_1_minute: int
- """The maximum tokens per minute."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account.py
deleted file mode 100644
index 9200ba11..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["ServiceAccount"]
-
-
-class ServiceAccount(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints"""
-
- created_at: int
- """The Unix timestamp (in seconds) of when the service account was created"""
-
- name: str
- """The name of the service account"""
-
- object: Literal["organization.project.service_account"]
- """The object type, which is always `organization.project.service_account`"""
-
- role: Literal["owner", "member"]
- """`owner` or `member`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py
deleted file mode 100644
index 409dcba5..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ServiceAccountCreateParams"]
-
-
-class ServiceAccountCreateParams(TypedDict, total=False):
- name: Required[str]
- """The name of the service account being created."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py
deleted file mode 100644
index e7757a8a..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["ServiceAccountCreateResponse", "APIKey"]
-
-
-class APIKey(BaseModel):
- id: str
-
- created_at: int
-
- name: str
-
- object: Literal["organization.project.service_account.api_key"]
- """The object type, which is always `organization.project.service_account.api_key`"""
-
- value: str
-
-
-class ServiceAccountCreateResponse(BaseModel):
- id: str
-
- api_key: APIKey
-
- created_at: int
-
- name: str
-
- object: Literal["organization.project.service_account"]
-
- role: Literal["member"]
- """Service accounts can only have one role of type `member`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py
deleted file mode 100644
index 28d04e10..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["ServiceAccountDeleteResponse"]
-
-
-class ServiceAccountDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["organization.project.service_account.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py
deleted file mode 100644
index 7f808e28..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["ServiceAccountListParams"]
-
-
-class ServiceAccountListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py
deleted file mode 100644
index 0818c8c8..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-from .service_account import ServiceAccount
-
-__all__ = ["ServiceAccountListResponse"]
-
-
-class ServiceAccountListResponse(BaseModel):
- data: List[ServiceAccount]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py
deleted file mode 100644
index 85f38c0c..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UserAddParams"]
-
-
-class UserAddParams(TypedDict, total=False):
- role: Required[Literal["owner", "member"]]
- """`owner` or `member`"""
-
- user_id: Required[str]
- """The ID of the user."""
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py
deleted file mode 100644
index 7ac68cc5..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["UserDeleteResponse"]
-
-
-class UserDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["organization.project.user.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py
deleted file mode 100644
index d561e907..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["UserListParams"]
-
-
-class UserListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py
deleted file mode 100644
index 1f8993ad..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ...._models import BaseModel
-from .project_user import ProjectUser
-
-__all__ = ["UserListResponse"]
-
-
-class UserListResponse(BaseModel):
- data: List[ProjectUser]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py
deleted file mode 100644
index 08b3e1a4..00000000
--- a/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UserUpdateParams"]
-
-
-class UserUpdateParams(TypedDict, total=False):
- project_id: Required[str]
-
- role: Required[Literal["owner", "member"]]
- """`owner` or `member`"""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py b/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py
deleted file mode 100644
index 819ffc37..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageAudioSpeechesParams"]
-
-
-class UsageAudioSpeechesParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
- combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py
deleted file mode 100644
index 318f85a3..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageAudioTranscriptionsParams"]
-
-
-class UsageAudioTranscriptionsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
- combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py
deleted file mode 100644
index 24322abe..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageCodeInterpreterSessionsParams"]
-
-
-class UsageCodeInterpreterSessionsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py
deleted file mode 100644
index 8bd94d39..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageCompletionsParams"]
-
-
-class UsageCompletionsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- batch: bool
- """If `true`, return batch jobs only.
-
- If `false`, return non-batch jobs only. By default, return both.
- """
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `batch`
- or any combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py b/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py
deleted file mode 100644
index c4a71264..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageEmbeddingsParams"]
-
-
-class UsageEmbeddingsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
- combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_images_params.py b/src/digitalocean_genai_sdk/types/organization/usage_images_params.py
deleted file mode 100644
index 31f2a31f..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_images_params.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageImagesParams"]
-
-
-class UsageImagesParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `size`,
- `source` or any combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]]
- """Return only usages for these image sizes.
-
- Possible values are `256x256`, `512x512`, `1024x1024`, `1792x1792`, `1024x1792`
- or any combination of them.
- """
-
- sources: List[Literal["image.generation", "image.edit", "image.variation"]]
- """Return only usages for these sources.
-
- Possible values are `image.generation`, `image.edit`, `image.variation` or any
- combination of them.
- """
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py b/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py
deleted file mode 100644
index 438fca8f..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageModerationsParams"]
-
-
-class UsageModerationsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- api_key_ids: List[str]
- """Return only usage for these API keys."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any
- combination of them.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- models: List[str]
- """Return only usage for these models."""
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
-
- user_ids: List[str]
- """Return only usage for these users."""
diff --git a/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py b/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py
deleted file mode 100644
index dc25f126..00000000
--- a/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UsageVectorStoresParams"]
-
-
-class UsageVectorStoresParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- bucket_width: Literal["1m", "1h", "1d"]
- """Width of each time bucket in response.
-
- Currently `1m`, `1h` and `1d` are supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id"]]
- """Group the usage data by the specified fields.
-
- Support fields include `project_id`.
- """
-
- limit: int
- """Specifies the number of buckets to return.
-
- - `bucket_width=1d`: default: 7, max: 31
- - `bucket_width=1h`: default: 24, max: 168
- - `bucket_width=1m`: default: 60, max: 1440
- """
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only usage for these projects."""
diff --git a/src/digitalocean_genai_sdk/types/organization/user_delete_response.py b/src/digitalocean_genai_sdk/types/organization/user_delete_response.py
deleted file mode 100644
index 5baab3bf..00000000
--- a/src/digitalocean_genai_sdk/types/organization/user_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["UserDeleteResponse"]
-
-
-class UserDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["organization.user.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/organization/user_list_params.py b/src/digitalocean_genai_sdk/types/organization/user_list_params.py
deleted file mode 100644
index c7ad6c74..00000000
--- a/src/digitalocean_genai_sdk/types/organization/user_list_params.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import TypedDict
-
-__all__ = ["UserListParams"]
-
-
-class UserListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- emails: List[str]
- """Filter by the email address of users."""
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization/user_list_response.py b/src/digitalocean_genai_sdk/types/organization/user_list_response.py
deleted file mode 100644
index 73aaf45b..00000000
--- a/src/digitalocean_genai_sdk/types/organization/user_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .organization_user import OrganizationUser
-
-__all__ = ["UserListResponse"]
-
-
-class UserListResponse(BaseModel):
- data: List[OrganizationUser]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/organization/user_update_params.py b/src/digitalocean_genai_sdk/types/organization/user_update_params.py
deleted file mode 100644
index bc276120..00000000
--- a/src/digitalocean_genai_sdk/types/organization/user_update_params.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UserUpdateParams"]
-
-
-class UserUpdateParams(TypedDict, total=False):
- role: Required[Literal["owner", "reader"]]
- """`owner` or `reader`"""
diff --git a/src/digitalocean_genai_sdk/types/organization_get_costs_params.py b/src/digitalocean_genai_sdk/types/organization_get_costs_params.py
deleted file mode 100644
index e114aa0f..00000000
--- a/src/digitalocean_genai_sdk/types/organization_get_costs_params.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["OrganizationGetCostsParams"]
-
-
-class OrganizationGetCostsParams(TypedDict, total=False):
- start_time: Required[int]
- """Start time (Unix seconds) of the query time range, inclusive."""
-
- bucket_width: Literal["1d"]
- """Width of each time bucket in response.
-
- Currently only `1d` is supported, default to `1d`.
- """
-
- end_time: int
- """End time (Unix seconds) of the query time range, exclusive."""
-
- group_by: List[Literal["project_id", "line_item"]]
- """Group the costs by the specified fields.
-
- Support fields include `project_id`, `line_item` and any combination of them.
- """
-
- limit: int
- """A limit on the number of buckets to be returned.
-
- Limit can range between 1 and 180, and the default is 7.
- """
-
- page: str
- """A cursor for use in pagination.
-
- Corresponding to the `next_page` field from the previous response.
- """
-
- project_ids: List[str]
- """Return only costs for these projects."""
diff --git a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py b/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py
deleted file mode 100644
index 36b79e57..00000000
--- a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import TypedDict
-
-from .audit_log_event_type import AuditLogEventType
-
-__all__ = ["OrganizationListAuditLogsParams", "EffectiveAt"]
-
-
-class OrganizationListAuditLogsParams(TypedDict, total=False):
- actor_emails: List[str]
- """Return only events performed by users with these emails."""
-
- actor_ids: List[str]
- """Return only events performed by these actors.
-
- Can be a user ID, a service account ID, or an api key tracking ID.
- """
-
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- effective_at: EffectiveAt
- """Return only events whose `effective_at` (Unix seconds) is in this range."""
-
- event_types: List[AuditLogEventType]
- """Return only events with a `type` in one of these values.
-
- For example, `project.created`. For all options, see the documentation for the
- [audit log object](/docs/api-reference/audit-logs/object).
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- project_ids: List[str]
- """Return only events for these projects."""
-
- resource_ids: List[str]
- """Return only events performed on these targets.
-
- For example, a project ID updated.
- """
-
-
-class EffectiveAt(TypedDict, total=False):
- gt: int
- """
- Return only events whose `effective_at` (Unix seconds) is greater than this
- value.
- """
-
- gte: int
- """
- Return only events whose `effective_at` (Unix seconds) is greater than or equal
- to this value.
- """
-
- lt: int
- """Return only events whose `effective_at` (Unix seconds) is less than this value."""
-
- lte: int
- """
- Return only events whose `effective_at` (Unix seconds) is less than or equal to
- this value.
- """
diff --git a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py b/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py
deleted file mode 100644
index 751ec527..00000000
--- a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py
+++ /dev/null
@@ -1,433 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-from .audit_log_actor_user import AuditLogActorUser
-from .audit_log_event_type import AuditLogEventType
-
-__all__ = [
- "OrganizationListAuditLogsResponse",
- "Data",
- "DataActor",
- "DataActorAPIKey",
- "DataActorAPIKeyServiceAccount",
- "DataActorSession",
- "DataAPIKeyCreated",
- "DataAPIKeyCreatedData",
- "DataAPIKeyDeleted",
- "DataAPIKeyUpdated",
- "DataAPIKeyUpdatedChangesRequested",
- "DataInviteAccepted",
- "DataInviteDeleted",
- "DataInviteSent",
- "DataInviteSentData",
- "DataLoginFailed",
- "DataLogoutFailed",
- "DataOrganizationUpdated",
- "DataOrganizationUpdatedChangesRequested",
- "DataOrganizationUpdatedChangesRequestedSettings",
- "DataProject",
- "DataProjectArchived",
- "DataProjectCreated",
- "DataProjectCreatedData",
- "DataProjectUpdated",
- "DataProjectUpdatedChangesRequested",
- "DataRateLimitDeleted",
- "DataRateLimitUpdated",
- "DataRateLimitUpdatedChangesRequested",
- "DataServiceAccountCreated",
- "DataServiceAccountCreatedData",
- "DataServiceAccountDeleted",
- "DataServiceAccountUpdated",
- "DataServiceAccountUpdatedChangesRequested",
- "DataUserAdded",
- "DataUserAddedData",
- "DataUserDeleted",
- "DataUserUpdated",
- "DataUserUpdatedChangesRequested",
-]
-
-
-class DataActorAPIKeyServiceAccount(BaseModel):
- id: Optional[str] = None
- """The service account id."""
-
-
-class DataActorAPIKey(BaseModel):
- id: Optional[str] = None
- """The tracking id of the API key."""
-
- service_account: Optional[DataActorAPIKeyServiceAccount] = None
- """The service account that performed the audit logged action."""
-
- type: Optional[Literal["user", "service_account"]] = None
- """The type of API key. Can be either `user` or `service_account`."""
-
- user: Optional[AuditLogActorUser] = None
- """The user who performed the audit logged action."""
-
-
-class DataActorSession(BaseModel):
- ip_address: Optional[str] = None
- """The IP address from which the action was performed."""
-
- user: Optional[AuditLogActorUser] = None
- """The user who performed the audit logged action."""
-
-
-class DataActor(BaseModel):
- api_key: Optional[DataActorAPIKey] = None
- """The API Key used to perform the audit logged action."""
-
- session: Optional[DataActorSession] = None
- """The session in which the audit logged action was performed."""
-
- type: Optional[Literal["session", "api_key"]] = None
- """The type of actor. Is either `session` or `api_key`."""
-
-
-class DataAPIKeyCreatedData(BaseModel):
- scopes: Optional[List[str]] = None
- """A list of scopes allowed for the API key, e.g. `["api.model.request"]`"""
-
-
-class DataAPIKeyCreated(BaseModel):
- id: Optional[str] = None
- """The tracking ID of the API key."""
-
- data: Optional[DataAPIKeyCreatedData] = None
- """The payload used to create the API key."""
-
-
-class DataAPIKeyDeleted(BaseModel):
- id: Optional[str] = None
- """The tracking ID of the API key."""
-
-
-class DataAPIKeyUpdatedChangesRequested(BaseModel):
- scopes: Optional[List[str]] = None
- """A list of scopes allowed for the API key, e.g. `["api.model.request"]`"""
-
-
-class DataAPIKeyUpdated(BaseModel):
- id: Optional[str] = None
- """The tracking ID of the API key."""
-
- changes_requested: Optional[DataAPIKeyUpdatedChangesRequested] = None
- """The payload used to update the API key."""
-
-
-class DataInviteAccepted(BaseModel):
- id: Optional[str] = None
- """The ID of the invite."""
-
-
-class DataInviteDeleted(BaseModel):
- id: Optional[str] = None
- """The ID of the invite."""
-
-
-class DataInviteSentData(BaseModel):
- email: Optional[str] = None
- """The email invited to the organization."""
-
- role: Optional[str] = None
- """The role the email was invited to be. Is either `owner` or `member`."""
-
-
-class DataInviteSent(BaseModel):
- id: Optional[str] = None
- """The ID of the invite."""
-
- data: Optional[DataInviteSentData] = None
- """The payload used to create the invite."""
-
-
-class DataLoginFailed(BaseModel):
- error_code: Optional[str] = None
- """The error code of the failure."""
-
- error_message: Optional[str] = None
- """The error message of the failure."""
-
-
-class DataLogoutFailed(BaseModel):
- error_code: Optional[str] = None
- """The error code of the failure."""
-
- error_message: Optional[str] = None
- """The error message of the failure."""
-
-
-class DataOrganizationUpdatedChangesRequestedSettings(BaseModel):
- threads_ui_visibility: Optional[str] = None
- """
- Visibility of the threads page which shows messages created with the Assistants
- API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`.
- """
-
- usage_dashboard_visibility: Optional[str] = None
- """
- Visibility of the usage dashboard which shows activity and costs for your
- organization. One of `ANY_ROLE` or `OWNERS`.
- """
-
-
-class DataOrganizationUpdatedChangesRequested(BaseModel):
- description: Optional[str] = None
- """The organization description."""
-
- name: Optional[str] = None
- """The organization name."""
-
- settings: Optional[DataOrganizationUpdatedChangesRequestedSettings] = None
-
- title: Optional[str] = None
- """The organization title."""
-
-
-class DataOrganizationUpdated(BaseModel):
- id: Optional[str] = None
- """The organization ID."""
-
- changes_requested: Optional[DataOrganizationUpdatedChangesRequested] = None
- """The payload used to update the organization settings."""
-
-
-class DataProject(BaseModel):
- id: Optional[str] = None
- """The project ID."""
-
- name: Optional[str] = None
- """The project title."""
-
-
-class DataProjectArchived(BaseModel):
- id: Optional[str] = None
- """The project ID."""
-
-
-class DataProjectCreatedData(BaseModel):
- name: Optional[str] = None
- """The project name."""
-
- title: Optional[str] = None
- """The title of the project as seen on the dashboard."""
-
-
-class DataProjectCreated(BaseModel):
- id: Optional[str] = None
- """The project ID."""
-
- data: Optional[DataProjectCreatedData] = None
- """The payload used to create the project."""
-
-
-class DataProjectUpdatedChangesRequested(BaseModel):
- title: Optional[str] = None
- """The title of the project as seen on the dashboard."""
-
-
-class DataProjectUpdated(BaseModel):
- id: Optional[str] = None
- """The project ID."""
-
- changes_requested: Optional[DataProjectUpdatedChangesRequested] = None
- """The payload used to update the project."""
-
-
-class DataRateLimitDeleted(BaseModel):
- id: Optional[str] = None
- """The rate limit ID"""
-
-
-class DataRateLimitUpdatedChangesRequested(BaseModel):
- batch_1_day_max_input_tokens: Optional[int] = None
- """The maximum batch input tokens per day. Only relevant for certain models."""
-
- max_audio_megabytes_per_1_minute: Optional[int] = None
- """The maximum audio megabytes per minute. Only relevant for certain models."""
-
- max_images_per_1_minute: Optional[int] = None
- """The maximum images per minute. Only relevant for certain models."""
-
- max_requests_per_1_day: Optional[int] = None
- """The maximum requests per day. Only relevant for certain models."""
-
- max_requests_per_1_minute: Optional[int] = None
- """The maximum requests per minute."""
-
- max_tokens_per_1_minute: Optional[int] = None
- """The maximum tokens per minute."""
-
-
-class DataRateLimitUpdated(BaseModel):
- id: Optional[str] = None
- """The rate limit ID"""
-
- changes_requested: Optional[DataRateLimitUpdatedChangesRequested] = None
- """The payload used to update the rate limits."""
-
-
-class DataServiceAccountCreatedData(BaseModel):
- role: Optional[str] = None
- """The role of the service account. Is either `owner` or `member`."""
-
-
-class DataServiceAccountCreated(BaseModel):
- id: Optional[str] = None
- """The service account ID."""
-
- data: Optional[DataServiceAccountCreatedData] = None
- """The payload used to create the service account."""
-
-
-class DataServiceAccountDeleted(BaseModel):
- id: Optional[str] = None
- """The service account ID."""
-
-
-class DataServiceAccountUpdatedChangesRequested(BaseModel):
- role: Optional[str] = None
- """The role of the service account. Is either `owner` or `member`."""
-
-
-class DataServiceAccountUpdated(BaseModel):
- id: Optional[str] = None
- """The service account ID."""
-
- changes_requested: Optional[DataServiceAccountUpdatedChangesRequested] = None
- """The payload used to updated the service account."""
-
-
-class DataUserAddedData(BaseModel):
- role: Optional[str] = None
- """The role of the user. Is either `owner` or `member`."""
-
-
-class DataUserAdded(BaseModel):
- id: Optional[str] = None
- """The user ID."""
-
- data: Optional[DataUserAddedData] = None
- """The payload used to add the user to the project."""
-
-
-class DataUserDeleted(BaseModel):
- id: Optional[str] = None
- """The user ID."""
-
-
-class DataUserUpdatedChangesRequested(BaseModel):
- role: Optional[str] = None
- """The role of the user. Is either `owner` or `member`."""
-
-
-class DataUserUpdated(BaseModel):
- id: Optional[str] = None
- """The project ID."""
-
- changes_requested: Optional[DataUserUpdatedChangesRequested] = None
- """The payload used to update the user."""
-
-
-class Data(BaseModel):
- id: str
- """The ID of this log."""
-
- actor: DataActor
- """The actor who performed the audit logged action."""
-
- effective_at: int
- """The Unix timestamp (in seconds) of the event."""
-
- type: AuditLogEventType
- """The event type."""
-
- api_key_created: Optional[DataAPIKeyCreated] = FieldInfo(alias="api_key.created", default=None)
- """The details for events with this `type`."""
-
- api_key_deleted: Optional[DataAPIKeyDeleted] = FieldInfo(alias="api_key.deleted", default=None)
- """The details for events with this `type`."""
-
- api_key_updated: Optional[DataAPIKeyUpdated] = FieldInfo(alias="api_key.updated", default=None)
- """The details for events with this `type`."""
-
- invite_accepted: Optional[DataInviteAccepted] = FieldInfo(alias="invite.accepted", default=None)
- """The details for events with this `type`."""
-
- invite_deleted: Optional[DataInviteDeleted] = FieldInfo(alias="invite.deleted", default=None)
- """The details for events with this `type`."""
-
- invite_sent: Optional[DataInviteSent] = FieldInfo(alias="invite.sent", default=None)
- """The details for events with this `type`."""
-
- login_failed: Optional[DataLoginFailed] = FieldInfo(alias="login.failed", default=None)
- """The details for events with this `type`."""
-
- logout_failed: Optional[DataLogoutFailed] = FieldInfo(alias="logout.failed", default=None)
- """The details for events with this `type`."""
-
- organization_updated: Optional[DataOrganizationUpdated] = FieldInfo(alias="organization.updated", default=None)
- """The details for events with this `type`."""
-
- project: Optional[DataProject] = None
- """The project that the action was scoped to.
-
- Absent for actions not scoped to projects.
- """
-
- project_archived: Optional[DataProjectArchived] = FieldInfo(alias="project.archived", default=None)
- """The details for events with this `type`."""
-
- project_created: Optional[DataProjectCreated] = FieldInfo(alias="project.created", default=None)
- """The details for events with this `type`."""
-
- project_updated: Optional[DataProjectUpdated] = FieldInfo(alias="project.updated", default=None)
- """The details for events with this `type`."""
-
- rate_limit_deleted: Optional[DataRateLimitDeleted] = FieldInfo(alias="rate_limit.deleted", default=None)
- """The details for events with this `type`."""
-
- rate_limit_updated: Optional[DataRateLimitUpdated] = FieldInfo(alias="rate_limit.updated", default=None)
- """The details for events with this `type`."""
-
- service_account_created: Optional[DataServiceAccountCreated] = FieldInfo(
- alias="service_account.created", default=None
- )
- """The details for events with this `type`."""
-
- service_account_deleted: Optional[DataServiceAccountDeleted] = FieldInfo(
- alias="service_account.deleted", default=None
- )
- """The details for events with this `type`."""
-
- service_account_updated: Optional[DataServiceAccountUpdated] = FieldInfo(
- alias="service_account.updated", default=None
- )
- """The details for events with this `type`."""
-
- user_added: Optional[DataUserAdded] = FieldInfo(alias="user.added", default=None)
- """The details for events with this `type`."""
-
- user_deleted: Optional[DataUserDeleted] = FieldInfo(alias="user.deleted", default=None)
- """The details for events with this `type`."""
-
- user_updated: Optional[DataUserUpdated] = FieldInfo(alias="user.updated", default=None)
- """The details for events with this `type`."""
-
-
-class OrganizationListAuditLogsResponse(BaseModel):
- data: List[Data]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: Literal["list"]
diff --git a/src/digitalocean_genai_sdk/types/output_message.py b/src/digitalocean_genai_sdk/types/output_message.py
deleted file mode 100644
index 4db6e72e..00000000
--- a/src/digitalocean_genai_sdk/types/output_message.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-
-__all__ = [
- "OutputMessage",
- "Content",
- "ContentOutputText",
- "ContentOutputTextAnnotation",
- "ContentOutputTextAnnotationFileCitation",
- "ContentOutputTextAnnotationURLCitation",
- "ContentOutputTextAnnotationFilePath",
- "ContentRefusal",
-]
-
-
-class ContentOutputTextAnnotationFileCitation(BaseModel):
- file_id: str
- """The ID of the file."""
-
- index: int
- """The index of the file in the list of files."""
-
- type: Literal["file_citation"]
- """The type of the file citation. Always `file_citation`."""
-
-
-class ContentOutputTextAnnotationURLCitation(BaseModel):
- end_index: int
- """The index of the last character of the URL citation in the message."""
-
- start_index: int
- """The index of the first character of the URL citation in the message."""
-
- title: str
- """The title of the web resource."""
-
- type: Literal["url_citation"]
- """The type of the URL citation. Always `url_citation`."""
-
- url: str
- """The URL of the web resource."""
-
-
-class ContentOutputTextAnnotationFilePath(BaseModel):
- file_id: str
- """The ID of the file."""
-
- index: int
- """The index of the file in the list of files."""
-
- type: Literal["file_path"]
- """The type of the file path. Always `file_path`."""
-
-
-ContentOutputTextAnnotation: TypeAlias = Union[
- ContentOutputTextAnnotationFileCitation, ContentOutputTextAnnotationURLCitation, ContentOutputTextAnnotationFilePath
-]
-
-
-class ContentOutputText(BaseModel):
- annotations: List[ContentOutputTextAnnotation]
- """The annotations of the text output."""
-
- text: str
- """The text output from the model."""
-
- type: Literal["output_text"]
- """The type of the output text. Always `output_text`."""
-
-
-class ContentRefusal(BaseModel):
- refusal: str
- """The refusal explanationfrom the model."""
-
- type: Literal["refusal"]
- """The type of the refusal. Always `refusal`."""
-
-
-Content: TypeAlias = Union[ContentOutputText, ContentRefusal]
-
-
-class OutputMessage(BaseModel):
- id: str
- """The unique ID of the output message."""
-
- content: List[Content]
- """The content of the output message."""
-
- role: Literal["assistant"]
- """The role of the output message. Always `assistant`."""
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the message input.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when input items
- are returned via API.
- """
-
- type: Literal["message"]
- """The type of the output message. Always `message`."""
diff --git a/src/digitalocean_genai_sdk/types/output_message_param.py b/src/digitalocean_genai_sdk/types/output_message_param.py
deleted file mode 100644
index 83f13e18..00000000
--- a/src/digitalocean_genai_sdk/types/output_message_param.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = [
- "OutputMessageParam",
- "Content",
- "ContentOutputText",
- "ContentOutputTextAnnotation",
- "ContentOutputTextAnnotationFileCitation",
- "ContentOutputTextAnnotationURLCitation",
- "ContentOutputTextAnnotationFilePath",
- "ContentRefusal",
-]
-
-
-class ContentOutputTextAnnotationFileCitation(TypedDict, total=False):
- file_id: Required[str]
- """The ID of the file."""
-
- index: Required[int]
- """The index of the file in the list of files."""
-
- type: Required[Literal["file_citation"]]
- """The type of the file citation. Always `file_citation`."""
-
-
-class ContentOutputTextAnnotationURLCitation(TypedDict, total=False):
- end_index: Required[int]
- """The index of the last character of the URL citation in the message."""
-
- start_index: Required[int]
- """The index of the first character of the URL citation in the message."""
-
- title: Required[str]
- """The title of the web resource."""
-
- type: Required[Literal["url_citation"]]
- """The type of the URL citation. Always `url_citation`."""
-
- url: Required[str]
- """The URL of the web resource."""
-
-
-class ContentOutputTextAnnotationFilePath(TypedDict, total=False):
- file_id: Required[str]
- """The ID of the file."""
-
- index: Required[int]
- """The index of the file in the list of files."""
-
- type: Required[Literal["file_path"]]
- """The type of the file path. Always `file_path`."""
-
-
-ContentOutputTextAnnotation: TypeAlias = Union[
- ContentOutputTextAnnotationFileCitation, ContentOutputTextAnnotationURLCitation, ContentOutputTextAnnotationFilePath
-]
-
-
-class ContentOutputText(TypedDict, total=False):
- annotations: Required[Iterable[ContentOutputTextAnnotation]]
- """The annotations of the text output."""
-
- text: Required[str]
- """The text output from the model."""
-
- type: Required[Literal["output_text"]]
- """The type of the output text. Always `output_text`."""
-
-
-class ContentRefusal(TypedDict, total=False):
- refusal: Required[str]
- """The refusal explanationfrom the model."""
-
- type: Required[Literal["refusal"]]
- """The type of the refusal. Always `refusal`."""
-
-
-Content: TypeAlias = Union[ContentOutputText, ContentRefusal]
-
-
-class OutputMessageParam(TypedDict, total=False):
- id: Required[str]
- """The unique ID of the output message."""
-
- content: Required[Iterable[Content]]
- """The content of the output message."""
-
- role: Required[Literal["assistant"]]
- """The role of the output message. Always `assistant`."""
-
- status: Required[Literal["in_progress", "completed", "incomplete"]]
- """The status of the message input.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when input items
- are returned via API.
- """
-
- type: Required[Literal["message"]]
- """The type of the output message. Always `message`."""
diff --git a/src/digitalocean_genai_sdk/types/realtime_create_session_params.py b/src/digitalocean_genai_sdk/types/realtime_create_session_params.py
deleted file mode 100644
index df105bac..00000000
--- a/src/digitalocean_genai_sdk/types/realtime_create_session_params.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable
-from typing_extensions import Literal, TypedDict
-
-from .voice_ids_shared_param import VoiceIDsSharedParam
-
-__all__ = [
- "RealtimeCreateSessionParams",
- "InputAudioNoiseReduction",
- "InputAudioTranscription",
- "Tool",
- "TurnDetection",
-]
-
-
-class RealtimeCreateSessionParams(TypedDict, total=False):
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
- """The format of input audio.
-
- Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
- be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
- byte order.
- """
-
- input_audio_noise_reduction: InputAudioNoiseReduction
- """Configuration for input audio noise reduction.
-
- This can be set to `null` to turn off. Noise reduction filters audio added to
- the input audio buffer before it is sent to VAD and the model. Filtering the
- audio can improve VAD and turn detection accuracy (reducing false positives) and
- model performance by improving perception of the input audio.
- """
-
- input_audio_transcription: InputAudioTranscription
- """
- Configuration for input audio transcription, defaults to off and can be set to
- `null` to turn off once on. Input audio transcription is not native to the
- model, since the model consumes audio directly. Transcription runs
- asynchronously through
- [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
- and should be treated as guidance of input audio content rather than precisely
- what the model heard. The client can optionally set the language and prompt for
- transcription, these offer additional guidance to the transcription service.
- """
-
- instructions: str
- """The default system instructions (i.e.
-
- system message) prepended to model calls. This field allows the client to guide
- the model on desired responses. The model can be instructed on response content
- and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
- good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
- into your voice", "laugh frequently"). The instructions are not guaranteed to be
- followed by the model, but they provide guidance to the model on the desired
- behavior.
-
- Note that the server sets default instructions which will be used if this field
- is not set and are visible in the `session.created` event at the start of the
- session.
- """
-
- max_response_output_tokens: Union[int, Literal["inf"]]
- """
- Maximum number of output tokens for a single assistant response, inclusive of
- tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
- `inf` for the maximum available tokens for a given model. Defaults to `inf`.
- """
-
- modalities: List[Literal["text", "audio"]]
- """The set of modalities the model can respond with.
-
- To disable audio, set this to ["text"].
- """
-
- model: Literal[
- "gpt-4o-realtime-preview",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-mini-realtime-preview",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- ]
- """The Realtime model used for this session."""
-
- output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
- """The format of output audio.
-
- Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
- sampled at a rate of 24kHz.
- """
-
- temperature: float
- """Sampling temperature for the model, limited to [0.6, 1.2].
-
- For audio models a temperature of 0.8 is highly recommended for best
- performance.
- """
-
- tool_choice: str
- """How the model chooses tools.
-
- Options are `auto`, `none`, `required`, or specify a function.
- """
-
- tools: Iterable[Tool]
- """Tools (functions) available to the model."""
-
- turn_detection: TurnDetection
- """Configuration for turn detection, ether Server VAD or Semantic VAD.
-
- This can be set to `null` to turn off, in which case the client must manually
- trigger model response. Server VAD means that the model will detect the start
- and end of speech based on audio volume and respond at the end of user speech.
- Semantic VAD is more advanced and uses a turn detection model (in conjuction
- with VAD) to semantically estimate whether the user has finished speaking, then
- dynamically sets a timeout based on this probability. For example, if user audio
- trails off with "uhhm", the model will score a low probability of turn end and
- wait longer for the user to continue speaking. This can be useful for more
- natural conversations, but may have a higher latency.
- """
-
- voice: VoiceIDsSharedParam
- """The voice the model uses to respond.
-
- Voice cannot be changed during the session once the model has responded with
- audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
- `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
- """
-
-
-class InputAudioNoiseReduction(TypedDict, total=False):
- type: Literal["near_field", "far_field"]
- """Type of noise reduction.
-
- `near_field` is for close-talking microphones such as headphones, `far_field` is
- for far-field microphones such as laptop or conference room microphones.
- """
-
-
-class InputAudioTranscription(TypedDict, total=False):
- language: str
- """The language of the input audio.
-
- Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
- """
-
- model: str
- """
- The model to use for transcription, current options are `gpt-4o-transcribe`,
- `gpt-4o-mini-transcribe`, and `whisper-1`.
- """
-
- prompt: str
- """
- An optional text to guide the model's style or continue a previous audio
- segment. For `whisper-1`, the
- [prompt is a list of keywords](/docs/guides/speech-to-text#prompting). For
- `gpt-4o-transcribe` models, the prompt is a free text string, for example
- "expect words related to technology".
- """
-
-
-class Tool(TypedDict, total=False):
- description: str
- """
- The description of the function, including guidance on when and how to call it,
- and guidance about what to tell the user when calling (if anything).
- """
-
- name: str
- """The name of the function."""
-
- parameters: object
- """Parameters of the function in JSON Schema."""
-
- type: Literal["function"]
- """The type of the tool, i.e. `function`."""
-
-
-class TurnDetection(TypedDict, total=False):
- create_response: bool
- """
- Whether or not to automatically generate a response when a VAD stop event
- occurs.
- """
-
- eagerness: Literal["low", "medium", "high", "auto"]
- """Used only for `semantic_vad` mode.
-
- The eagerness of the model to respond. `low` will wait longer for the user to
- continue speaking, `high` will respond more quickly. `auto` is the default and
- is equivalent to `medium`.
- """
-
- interrupt_response: bool
- """
- Whether or not to automatically interrupt any ongoing response with output to
- the default conversation (i.e. `conversation` of `auto`) when a VAD start event
- occurs.
- """
-
- prefix_padding_ms: int
- """Used only for `server_vad` mode.
-
- Amount of audio to include before the VAD detected speech (in milliseconds).
- Defaults to 300ms.
- """
-
- silence_duration_ms: int
- """Used only for `server_vad` mode.
-
- Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
- With shorter values the model will respond more quickly, but may jump in on
- short pauses from the user.
- """
-
- threshold: float
- """Used only for `server_vad` mode.
-
- Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
- threshold will require louder audio to activate the model, and thus might
- perform better in noisy environments.
- """
-
- type: Literal["server_vad", "semantic_vad"]
- """Type of turn detection."""
diff --git a/src/digitalocean_genai_sdk/types/realtime_create_session_response.py b/src/digitalocean_genai_sdk/types/realtime_create_session_response.py
deleted file mode 100644
index 1b7bc03c..00000000
--- a/src/digitalocean_genai_sdk/types/realtime_create_session_response.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .voice_ids_shared import VoiceIDsShared
-
-__all__ = ["RealtimeCreateSessionResponse", "ClientSecret", "InputAudioTranscription", "Tool", "TurnDetection"]
-
-
-class ClientSecret(BaseModel):
- expires_at: int
- """Timestamp for when the token expires.
-
- Currently, all tokens expire after one minute.
- """
-
- value: str
- """
- Ephemeral key usable in client environments to authenticate connections to the
- Realtime API. Use this in client-side environments rather than a standard API
- token, which should only be used server-side.
- """
-
-
-class InputAudioTranscription(BaseModel):
- model: Optional[str] = None
- """
- The model to use for transcription, `whisper-1` is the only currently supported
- model.
- """
-
-
-class Tool(BaseModel):
- description: Optional[str] = None
- """
- The description of the function, including guidance on when and how to call it,
- and guidance about what to tell the user when calling (if anything).
- """
-
- name: Optional[str] = None
- """The name of the function."""
-
- parameters: Optional[object] = None
- """Parameters of the function in JSON Schema."""
-
- type: Optional[Literal["function"]] = None
- """The type of the tool, i.e. `function`."""
-
-
-class TurnDetection(BaseModel):
- prefix_padding_ms: Optional[int] = None
- """Amount of audio to include before the VAD detected speech (in milliseconds).
-
- Defaults to 300ms.
- """
-
- silence_duration_ms: Optional[int] = None
- """Duration of silence to detect speech stop (in milliseconds).
-
- Defaults to 500ms. With shorter values the model will respond more quickly, but
- may jump in on short pauses from the user.
- """
-
- threshold: Optional[float] = None
- """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5.
-
- A higher threshold will require louder audio to activate the model, and thus
- might perform better in noisy environments.
- """
-
- type: Optional[str] = None
- """Type of turn detection, only `server_vad` is currently supported."""
-
-
-class RealtimeCreateSessionResponse(BaseModel):
- client_secret: ClientSecret
- """Ephemeral key returned by the API."""
-
- input_audio_format: Optional[str] = None
- """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
-
- input_audio_transcription: Optional[InputAudioTranscription] = None
- """
- Configuration for input audio transcription, defaults to off and can be set to
- `null` to turn off once on. Input audio transcription is not native to the
- model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
- """
-
- instructions: Optional[str] = None
- """The default system instructions (i.e.
-
- system message) prepended to model calls. This field allows the client to guide
- the model on desired responses. The model can be instructed on response content
- and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
- good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
- into your voice", "laugh frequently"). The instructions are not guaranteed to be
- followed by the model, but they provide guidance to the model on the desired
- behavior.
-
- Note that the server sets default instructions which will be used if this field
- is not set and are visible in the `session.created` event at the start of the
- session.
- """
-
- max_response_output_tokens: Union[int, Literal["inf"], None] = None
- """
- Maximum number of output tokens for a single assistant response, inclusive of
- tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
- `inf` for the maximum available tokens for a given model. Defaults to `inf`.
- """
-
- modalities: Optional[List[Literal["text", "audio"]]] = None
- """The set of modalities the model can respond with.
-
- To disable audio, set this to ["text"].
- """
-
- output_audio_format: Optional[str] = None
- """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
-
- temperature: Optional[float] = None
- """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
-
- tool_choice: Optional[str] = None
- """How the model chooses tools.
-
- Options are `auto`, `none`, `required`, or specify a function.
- """
-
- tools: Optional[List[Tool]] = None
- """Tools (functions) available to the model."""
-
- turn_detection: Optional[TurnDetection] = None
- """Configuration for turn detection.
-
- Can be set to `null` to turn off. Server VAD means that the model will detect
- the start and end of speech based on audio volume and respond at the end of user
- speech.
- """
-
- voice: Optional[VoiceIDsShared] = None
- """The voice the model uses to respond.
-
- Voice cannot be changed during the session once the model has responded with
- audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
- `coral`, `echo` `sage`, `shimmer` and `verse`.
- """
diff --git a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py b/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py
deleted file mode 100644
index 21912679..00000000
--- a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, TypedDict
-
-__all__ = [
- "RealtimeCreateTranscriptionSessionParams",
- "InputAudioNoiseReduction",
- "InputAudioTranscription",
- "TurnDetection",
-]
-
-
-class RealtimeCreateTranscriptionSessionParams(TypedDict, total=False):
- include: List[str]
- """The set of items to include in the transcription. Current available items are:
-
- - `item.input_audio_transcription.logprobs`
- """
-
- input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
- """The format of input audio.
-
- Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
- be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
- byte order.
- """
-
- input_audio_noise_reduction: InputAudioNoiseReduction
- """Configuration for input audio noise reduction.
-
- This can be set to `null` to turn off. Noise reduction filters audio added to
- the input audio buffer before it is sent to VAD and the model. Filtering the
- audio can improve VAD and turn detection accuracy (reducing false positives) and
- model performance by improving perception of the input audio.
- """
-
- input_audio_transcription: InputAudioTranscription
- """Configuration for input audio transcription.
-
- The client can optionally set the language and prompt for transcription, these
- offer additional guidance to the transcription service.
- """
-
- modalities: List[Literal["text", "audio"]]
- """The set of modalities the model can respond with.
-
- To disable audio, set this to ["text"].
- """
-
- turn_detection: TurnDetection
- """Configuration for turn detection, ether Server VAD or Semantic VAD.
-
- This can be set to `null` to turn off, in which case the client must manually
- trigger model response. Server VAD means that the model will detect the start
- and end of speech based on audio volume and respond at the end of user speech.
- Semantic VAD is more advanced and uses a turn detection model (in conjuction
- with VAD) to semantically estimate whether the user has finished speaking, then
- dynamically sets a timeout based on this probability. For example, if user audio
- trails off with "uhhm", the model will score a low probability of turn end and
- wait longer for the user to continue speaking. This can be useful for more
- natural conversations, but may have a higher latency.
- """
-
-
-class InputAudioNoiseReduction(TypedDict, total=False):
- type: Literal["near_field", "far_field"]
- """Type of noise reduction.
-
- `near_field` is for close-talking microphones such as headphones, `far_field` is
- for far-field microphones such as laptop or conference room microphones.
- """
-
-
-class InputAudioTranscription(TypedDict, total=False):
- language: str
- """The language of the input audio.
-
- Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
- """
-
- model: Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]
- """
- The model to use for transcription, current options are `gpt-4o-transcribe`,
- `gpt-4o-mini-transcribe`, and `whisper-1`.
- """
-
- prompt: str
- """
- An optional text to guide the model's style or continue a previous audio
- segment. For `whisper-1`, the
- [prompt is a list of keywords](/docs/guides/speech-to-text#prompting). For
- `gpt-4o-transcribe` models, the prompt is a free text string, for example
- "expect words related to technology".
- """
-
-
-class TurnDetection(TypedDict, total=False):
- create_response: bool
- """Whether or not to automatically generate a response when a VAD stop event
- occurs.
-
- Not available for transcription sessions.
- """
-
- eagerness: Literal["low", "medium", "high", "auto"]
- """Used only for `semantic_vad` mode.
-
- The eagerness of the model to respond. `low` will wait longer for the user to
- continue speaking, `high` will respond more quickly. `auto` is the default and
- is equivalent to `medium`.
- """
-
- interrupt_response: bool
- """
- Whether or not to automatically interrupt any ongoing response with output to
- the default conversation (i.e. `conversation` of `auto`) when a VAD start event
- occurs. Not available for transcription sessions.
- """
-
- prefix_padding_ms: int
- """Used only for `server_vad` mode.
-
- Amount of audio to include before the VAD detected speech (in milliseconds).
- Defaults to 300ms.
- """
-
- silence_duration_ms: int
- """Used only for `server_vad` mode.
-
- Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
- With shorter values the model will respond more quickly, but may jump in on
- short pauses from the user.
- """
-
- threshold: float
- """Used only for `server_vad` mode.
-
- Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
- threshold will require louder audio to activate the model, and thus might
- perform better in noisy environments.
- """
-
- type: Literal["server_vad", "semantic_vad"]
- """Type of turn detection."""
diff --git a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py b/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py
deleted file mode 100644
index bbd0b9de..00000000
--- a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["RealtimeCreateTranscriptionSessionResponse", "ClientSecret", "InputAudioTranscription", "TurnDetection"]
-
-
-class ClientSecret(BaseModel):
- expires_at: int
- """Timestamp for when the token expires.
-
- Currently, all tokens expire after one minute.
- """
-
- value: str
- """
- Ephemeral key usable in client environments to authenticate connections to the
- Realtime API. Use this in client-side environments rather than a standard API
- token, which should only be used server-side.
- """
-
-
-class InputAudioTranscription(BaseModel):
- language: Optional[str] = None
- """The language of the input audio.
-
- Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
- format will improve accuracy and latency.
- """
-
- model: Optional[Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]] = None
- """The model to use for transcription.
-
- Can be `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, or `whisper-1`.
- """
-
- prompt: Optional[str] = None
- """An optional text to guide the model's style or continue a previous audio
- segment.
-
- The [prompt](/docs/guides/speech-to-text#prompting) should match the audio
- language.
- """
-
-
-class TurnDetection(BaseModel):
- prefix_padding_ms: Optional[int] = None
- """Amount of audio to include before the VAD detected speech (in milliseconds).
-
- Defaults to 300ms.
- """
-
- silence_duration_ms: Optional[int] = None
- """Duration of silence to detect speech stop (in milliseconds).
-
- Defaults to 500ms. With shorter values the model will respond more quickly, but
- may jump in on short pauses from the user.
- """
-
- threshold: Optional[float] = None
- """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5.
-
- A higher threshold will require louder audio to activate the model, and thus
- might perform better in noisy environments.
- """
-
- type: Optional[str] = None
- """Type of turn detection, only `server_vad` is currently supported."""
-
-
-class RealtimeCreateTranscriptionSessionResponse(BaseModel):
- client_secret: ClientSecret
- """Ephemeral key returned by the API.
-
- Only present when the session is created on the server via REST API.
- """
-
- input_audio_format: Optional[str] = None
- """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
-
- input_audio_transcription: Optional[InputAudioTranscription] = None
- """Configuration of the transcription model."""
-
- modalities: Optional[List[Literal["text", "audio"]]] = None
- """The set of modalities the model can respond with.
-
- To disable audio, set this to ["text"].
- """
-
- turn_detection: Optional[TurnDetection] = None
- """Configuration for turn detection.
-
- Can be set to `null` to turn off. Server VAD means that the model will detect
- the start and end of speech based on audio volume and respond at the end of user
- speech.
- """
diff --git a/src/digitalocean_genai_sdk/types/reasoning_effort.py b/src/digitalocean_genai_sdk/types/reasoning_effort.py
deleted file mode 100644
index ace21b67..00000000
--- a/src/digitalocean_genai_sdk/types/reasoning_effort.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["ReasoningEffort"]
-
-ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
diff --git a/src/digitalocean_genai_sdk/types/reasoning_item.py b/src/digitalocean_genai_sdk/types/reasoning_item.py
deleted file mode 100644
index 28a64183..00000000
--- a/src/digitalocean_genai_sdk/types/reasoning_item.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ReasoningItem", "Summary"]
-
-
-class Summary(BaseModel):
- text: str
- """
- A short summary of the reasoning used by the model when generating the response.
- """
-
- type: Literal["summary_text"]
- """The type of the object. Always `summary_text`."""
-
-
-class ReasoningItem(BaseModel):
- id: str
- """The unique identifier of the reasoning content."""
-
- summary: List[Summary]
- """Reasoning text contents."""
-
- type: Literal["reasoning"]
- """The type of the object. Always `reasoning`."""
-
- status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/reasoning_item_param.py b/src/digitalocean_genai_sdk/types/reasoning_item_param.py
deleted file mode 100644
index 4d2a0504..00000000
--- a/src/digitalocean_genai_sdk/types/reasoning_item_param.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["ReasoningItemParam", "Summary"]
-
-
-class Summary(TypedDict, total=False):
- text: Required[str]
- """
- A short summary of the reasoning used by the model when generating the response.
- """
-
- type: Required[Literal["summary_text"]]
- """The type of the object. Always `summary_text`."""
-
-
-class ReasoningItemParam(TypedDict, total=False):
- id: Required[str]
- """The unique identifier of the reasoning content."""
-
- summary: Required[Iterable[Summary]]
- """Reasoning text contents."""
-
- type: Required[Literal["reasoning"]]
- """The type of the object. Always `reasoning`."""
-
- status: Literal["in_progress", "completed", "incomplete"]
- """The status of the item.
-
- One of `in_progress`, `completed`, or `incomplete`. Populated when items are
- returned via API.
- """
diff --git a/src/digitalocean_genai_sdk/types/response.py b/src/digitalocean_genai_sdk/types/response.py
deleted file mode 100644
index 523eedfc..00000000
--- a/src/digitalocean_genai_sdk/types/response.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from .._utils import PropertyInfo
-from .._models import BaseModel
-from .output_message import OutputMessage
-from .reasoning_item import ReasoningItem
-from .computer_tool_call import ComputerToolCall
-from .function_tool_call import FunctionToolCall
-from .response_properties import ResponseProperties
-from .web_search_tool_call import WebSearchToolCall
-from .file_search_tool_call import FileSearchToolCall
-from .model_response_properties import ModelResponseProperties
-
-__all__ = [
- "Response",
- "ResponseError",
- "ResponseIncompleteDetails",
- "ResponseOutput",
- "ResponseUsage",
- "ResponseUsageInputTokensDetails",
- "ResponseUsageOutputTokensDetails",
-]
-
-
-class ResponseError(BaseModel):
- code: Literal[
- "server_error",
- "rate_limit_exceeded",
- "invalid_prompt",
- "vector_store_timeout",
- "invalid_image",
- "invalid_image_format",
- "invalid_base64_image",
- "invalid_image_url",
- "image_too_large",
- "image_too_small",
- "image_parse_error",
- "image_content_policy_violation",
- "invalid_image_mode",
- "image_file_too_large",
- "unsupported_image_media_type",
- "empty_image_file",
- "failed_to_download_image",
- "image_file_not_found",
- ]
- """The error code for the response."""
-
- message: str
- """A human-readable description of the error."""
-
-
-class ResponseIncompleteDetails(BaseModel):
- reason: Optional[Literal["max_output_tokens", "content_filter"]] = None
- """The reason why the response is incomplete."""
-
-
-ResponseOutput: TypeAlias = Annotated[
- Union[OutputMessage, FileSearchToolCall, FunctionToolCall, WebSearchToolCall, ComputerToolCall, ReasoningItem],
- PropertyInfo(discriminator="type"),
-]
-
-
-class ResponseUsageInputTokensDetails(BaseModel):
- cached_tokens: int
- """The number of tokens that were retrieved from the cache.
-
- [More on prompt caching](/docs/guides/prompt-caching).
- """
-
-
-class ResponseUsageOutputTokensDetails(BaseModel):
- reasoning_tokens: int
- """The number of reasoning tokens."""
-
-
-class ResponseUsage(BaseModel):
- input_tokens: int
- """The number of input tokens."""
-
- input_tokens_details: ResponseUsageInputTokensDetails
- """A detailed breakdown of the input tokens."""
-
- output_tokens: int
- """The number of output tokens."""
-
- output_tokens_details: ResponseUsageOutputTokensDetails
- """A detailed breakdown of the output tokens."""
-
- total_tokens: int
- """The total number of tokens used."""
-
-
-class Response(ModelResponseProperties, ResponseProperties):
- id: str
- """Unique identifier for this Response."""
-
- created_at: float
- """Unix timestamp (in seconds) of when this Response was created."""
-
- error: Optional[ResponseError] = None
- """An error object returned when the model fails to generate a Response."""
-
- incomplete_details: Optional[ResponseIncompleteDetails] = None
- """Details about why the response is incomplete."""
-
- object: Literal["response"]
- """The object type of this resource - always set to `response`."""
-
- output: List[ResponseOutput]
- """An array of content items generated by the model.
-
- - The length and order of items in the `output` array is dependent on the
- model's response.
- - Rather than accessing the first item in the `output` array and assuming it's
- an `assistant` message with the content generated by the model, you might
- consider using the `output_text` property where supported in SDKs.
- """
-
- parallel_tool_calls: bool
- """Whether to allow the model to run tool calls in parallel."""
-
- output_text: Optional[str] = None
- """
- SDK-only convenience property that contains the aggregated text output from all
- `output_text` items in the `output` array, if any are present. Supported in the
- Python and JavaScript SDKs.
- """
-
- status: Optional[Literal["completed", "failed", "in_progress", "incomplete"]] = None
- """The status of the response generation.
-
- One of `completed`, `failed`, `in_progress`, or `incomplete`.
- """
-
- usage: Optional[ResponseUsage] = None
- """
- Represents token usage details including input tokens, output tokens, a
- breakdown of output tokens, and the total tokens used.
- """
diff --git a/src/digitalocean_genai_sdk/types/response_create_params.py b/src/digitalocean_genai_sdk/types/response_create_params.py
deleted file mode 100644
index 878e53a5..00000000
--- a/src/digitalocean_genai_sdk/types/response_create_params.py
+++ /dev/null
@@ -1,494 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .includable import Includable
-from .reasoning_effort import ReasoningEffort
-from .input_content_param import InputContentParam
-from .input_message_param import InputMessageParam
-from .output_message_param import OutputMessageParam
-from .reasoning_item_param import ReasoningItemParam
-from .compound_filter_param import CompoundFilterParam
-from .comparison_filter_param import ComparisonFilterParam
-from .computer_tool_call_param import ComputerToolCallParam
-from .function_tool_call_param import FunctionToolCallParam
-from .web_search_tool_call_param import WebSearchToolCallParam
-from .file_search_tool_call_param import FileSearchToolCallParam
-from .chat.web_search_context_size import WebSearchContextSize
-from .chat.web_search_location_param import WebSearchLocationParam
-from .chat.response_format_text_param import ResponseFormatTextParam
-from .computer_tool_call_output_param import ComputerToolCallOutputParam
-from .function_tool_call_output_param import FunctionToolCallOutputParam
-from .chat.response_format_json_object_param import ResponseFormatJsonObjectParam
-
-__all__ = [
- "ResponseCreateParams",
- "InputInputItemList",
- "InputInputItemListMessage",
- "InputInputItemListItemReference",
- "Reasoning",
- "Text",
- "TextFormat",
- "TextFormatTextResponseFormatJsonSchema",
- "ToolChoice",
- "ToolChoiceToolChoiceTypes",
- "ToolChoiceToolChoiceFunction",
- "Tool",
- "ToolFileSearchTool",
- "ToolFileSearchToolFilters",
- "ToolFileSearchToolRankingOptions",
- "ToolFunctionTool",
- "ToolComputerTool",
- "ToolWebSearchTool",
- "ToolWebSearchToolUserLocation",
-]
-
-
-class ResponseCreateParams(TypedDict, total=False):
- input: Required[Union[str, Iterable[InputInputItemList]]]
- """Text, image, or file inputs to the model, used to generate a response.
-
- Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Image inputs](/docs/guides/images)
- - [File inputs](/docs/guides/pdf-files)
- - [Conversation state](/docs/guides/conversation-state)
- - [Function calling](/docs/guides/function-calling)
- """
-
- model: Required[
- Union[
- Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "o1-preview",
- "o1-preview-2024-09-12",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-audio-preview",
- "gpt-4o-audio-preview-2024-10-01",
- "gpt-4o-audio-preview-2024-12-17",
- "gpt-4o-mini-audio-preview",
- "gpt-4o-mini-audio-preview-2024-12-17",
- "gpt-4o-search-preview",
- "gpt-4o-mini-search-preview",
- "gpt-4o-search-preview-2025-03-11",
- "gpt-4o-mini-search-preview-2025-03-11",
- "chatgpt-4o-latest",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- "o1-pro",
- "o1-pro-2025-03-19",
- "computer-use-preview",
- "computer-use-preview-2025-03-11",
- ],
- str,
- ]
- ]
- """Model ID used to generate the response, like `gpt-4o` or `o1`.
-
- OpenAI offers a wide range of models with different capabilities, performance
- characteristics, and price points. Refer to the [model guide](/docs/models) to
- browse and compare available models.
- """
-
- include: Optional[List[Includable]]
- """Specify additional output data to include in the model response.
-
- Currently supported values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
- """
-
- instructions: Optional[str]
- """
- Inserts a system (or developer) message as the first item in the model's
- context.
-
- When using along with `previous_response_id`, the instructions from a previous
- response will be not be carried over to the next response. This makes it simple
- to swap out system (or developer) messages in new responses.
- """
-
- max_output_tokens: Optional[int]
- """
- An upper bound for the number of tokens that can be generated for a response,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- parallel_tool_calls: Optional[bool]
- """Whether to allow the model to run tool calls in parallel."""
-
- previous_response_id: Optional[str]
- """The unique ID of the previous response to the model.
-
- Use this to create multi-turn conversations. Learn more about
- [conversation state](/docs/guides/conversation-state).
- """
-
- reasoning: Optional[Reasoning]
- """**o-series models only**
-
- Configuration options for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- """
-
- store: Optional[bool]
- """Whether to store the generated model response for later retrieval via API."""
-
- stream: Optional[bool]
- """
- If set to true, the model response data will be streamed to the client as it is
- generated using
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
- See the [Streaming section below](/docs/api-reference/responses-streaming) for
- more information.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic. We generally recommend altering
- this or `top_p` but not both.
- """
-
- text: Text
- """Configuration options for a text response from the model.
-
- Can be plain text or structured JSON data. Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Structured Outputs](/docs/guides/structured-outputs)
- """
-
- tool_choice: ToolChoice
- """
- How the model should select which tool (or tools) to use when generating a
- response. See the `tools` parameter to see how to specify which tools the model
- can call.
- """
-
- tools: Iterable[Tool]
- """An array of tools the model may call while generating a response.
-
- You can specify which tool to use by setting the `tool_choice` parameter.
-
- The two categories of tools you can provide the model are:
-
- - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
- capabilities, like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search). Learn more about
- [built-in tools](/docs/guides/tools).
- - **Function calls (custom tools)**: Functions that are defined by you, enabling
- the model to call your own code. Learn more about
- [function calling](/docs/guides/function-calling).
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
- """
-
- truncation: Optional[Literal["auto", "disabled"]]
- """The truncation strategy to use for the model response.
-
- - `auto`: If the context of this response and previous ones exceeds the model's
- context window size, the model will truncate the response to fit the context
- window by dropping input items in the middle of the conversation.
- - `disabled` (default): If a model response will exceed the context window size
- for a model, the request will fail with a 400 error.
- """
-
- user: str
- """
- A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
- """
-
-
-class InputInputItemListMessage(TypedDict, total=False):
- content: Required[Union[str, Iterable[InputContentParam]]]
- """
- Text, image, or audio input to the model, used to generate a response. Can also
- contain previous assistant responses.
- """
-
- role: Required[Literal["user", "assistant", "system", "developer"]]
- """The role of the message input.
-
- One of `user`, `assistant`, `system`, or `developer`.
- """
-
- type: Literal["message"]
- """The type of the message input. Always `message`."""
-
-
-class InputInputItemListItemReference(TypedDict, total=False):
- id: Required[str]
- """The ID of the item to reference."""
-
- type: Required[Literal["item_reference"]]
- """The type of item to reference. Always `item_reference`."""
-
-
-InputInputItemList: TypeAlias = Union[
- InputInputItemListMessage,
- InputMessageParam,
- OutputMessageParam,
- FileSearchToolCallParam,
- ComputerToolCallParam,
- ComputerToolCallOutputParam,
- WebSearchToolCallParam,
- FunctionToolCallParam,
- FunctionToolCallOutputParam,
- ReasoningItemParam,
- InputInputItemListItemReference,
-]
-
-
-class Reasoning(TypedDict, total=False):
- effort: Optional[ReasoningEffort]
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- generate_summary: Optional[Literal["concise", "detailed"]]
- """**computer_use_preview only**
-
- A summary of the reasoning performed by the model. This can be useful for
- debugging and understanding the model's reasoning process. One of `concise` or
- `detailed`.
- """
-
-
-class TextFormatTextResponseFormatJsonSchema(TypedDict, total=False):
- schema: Required[Dict[str, object]]
- """
- The schema for the response format, described as a JSON Schema object. Learn how
- to build JSON schemas [here](https://json-schema.org/).
- """
-
- type: Required[Literal["json_schema"]]
- """The type of response format being defined. Always `json_schema`."""
-
- description: str
- """
- A description of what the response format is for, used by the model to determine
- how to respond in the format.
- """
-
- name: str
- """The name of the response format.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- strict: Optional[bool]
- """
- Whether to enable strict schema adherence when generating the output. If set to
- true, the model will always follow the exact schema defined in the `schema`
- field. Only a subset of JSON Schema is supported when `strict` is `true`. To
- learn more, read the
- [Structured Outputs guide](/docs/guides/structured-outputs).
- """
-
-
-TextFormat: TypeAlias = Union[
- ResponseFormatTextParam, TextFormatTextResponseFormatJsonSchema, ResponseFormatJsonObjectParam
-]
-
-
-class Text(TypedDict, total=False):
- format: TextFormat
- """An object specifying the format that the model must output.
-
- Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
- ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](/docs/guides/structured-outputs).
-
- The default format is `{ "type": "text" }` with no additional options.
-
- **Not recommended for gpt-4o and newer models:**
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
- """
-
-
-class ToolChoiceToolChoiceTypes(TypedDict, total=False):
- type: Required[
- Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
- ]
- """The type of hosted tool the model should to use.
-
- Learn more about [built-in tools](/docs/guides/tools).
-
- Allowed values are:
-
- - `file_search`
- - `web_search_preview`
- - `computer_use_preview`
- """
-
-
-class ToolChoiceToolChoiceFunction(TypedDict, total=False):
- name: Required[str]
- """The name of the function to call."""
-
- type: Required[Literal["function"]]
- """For function calling, the type is always `function`."""
-
-
-ToolChoice: TypeAlias = Union[
- Literal["none", "auto", "required"], ToolChoiceToolChoiceTypes, ToolChoiceToolChoiceFunction
-]
-
-ToolFileSearchToolFilters: TypeAlias = Union[ComparisonFilterParam, CompoundFilterParam]
-
-
-class ToolFileSearchToolRankingOptions(TypedDict, total=False):
- ranker: Literal["auto", "default-2024-11-15"]
- """The ranker to use for the file search."""
-
- score_threshold: float
- """
- The score threshold for the file search, a number between 0 and 1. Numbers
- closer to 1 will attempt to return only the most relevant results, but may
- return fewer results.
- """
-
-
-class ToolFileSearchTool(TypedDict, total=False):
- type: Required[Literal["file_search"]]
- """The type of the file search tool. Always `file_search`."""
-
- vector_store_ids: Required[List[str]]
- """The IDs of the vector stores to search."""
-
- filters: ToolFileSearchToolFilters
- """A filter to apply based on file attributes."""
-
- max_num_results: int
- """The maximum number of results to return.
-
- This number should be between 1 and 50 inclusive.
- """
-
- ranking_options: ToolFileSearchToolRankingOptions
- """Ranking options for search."""
-
-
-class ToolFunctionTool(TypedDict, total=False):
- name: Required[str]
- """The name of the function to call."""
-
- parameters: Required[Dict[str, object]]
- """A JSON schema object describing the parameters of the function."""
-
- strict: Required[bool]
- """Whether to enforce strict parameter validation. Default `true`."""
-
- type: Required[Literal["function"]]
- """The type of the function tool. Always `function`."""
-
- description: Optional[str]
- """A description of the function.
-
- Used by the model to determine whether or not to call the function.
- """
-
-
-class ToolComputerTool(TypedDict, total=False):
- display_height: Required[float]
- """The height of the computer display."""
-
- display_width: Required[float]
- """The width of the computer display."""
-
- environment: Required[Literal["mac", "windows", "ubuntu", "browser"]]
- """The type of computer environment to control."""
-
- type: Required[Literal["computer_use_preview"]]
- """The type of the computer use tool. Always `computer_use_preview`."""
-
-
-class ToolWebSearchToolUserLocation(WebSearchLocationParam, total=False):
- type: Required[Literal["approximate"]]
- """The type of location approximation. Always `approximate`."""
-
-
-class ToolWebSearchTool(TypedDict, total=False):
- type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]]
- """The type of the web search tool. One of:
-
- - `web_search_preview`
- - `web_search_preview_2025_03_11`
- """
-
- search_context_size: WebSearchContextSize
- """
- High level guidance for the amount of context window space to use for the
- search. One of `low`, `medium`, or `high`. `medium` is the default.
- """
-
- user_location: Optional[ToolWebSearchToolUserLocation]
- """Approximate location parameters for the search."""
-
-
-Tool: TypeAlias = Union[ToolFileSearchTool, ToolFunctionTool, ToolComputerTool, ToolWebSearchTool]
diff --git a/src/digitalocean_genai_sdk/types/response_list_input_items_params.py b/src/digitalocean_genai_sdk/types/response_list_input_items_params.py
deleted file mode 100644
index cba0c8b8..00000000
--- a/src/digitalocean_genai_sdk/types/response_list_input_items_params.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["ResponseListInputItemsParams"]
-
-
-class ResponseListInputItemsParams(TypedDict, total=False):
- after: str
- """An item ID to list items after, used in pagination."""
-
- before: str
- """An item ID to list items before, used in pagination."""
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """The order to return the input items in. Default is `asc`.
-
- - `asc`: Return the input items in ascending order.
- - `desc`: Return the input items in descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/response_list_input_items_response.py b/src/digitalocean_genai_sdk/types/response_list_input_items_response.py
deleted file mode 100644
index 95f4555e..00000000
--- a/src/digitalocean_genai_sdk/types/response_list_input_items_response.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from .._utils import PropertyInfo
-from .._models import BaseModel
-from .input_message import InputMessage
-from .output_message import OutputMessage
-from .computer_tool_call import ComputerToolCall
-from .function_tool_call import FunctionToolCall
-from .web_search_tool_call import WebSearchToolCall
-from .file_search_tool_call import FileSearchToolCall
-from .computer_tool_call_output import ComputerToolCallOutput
-from .function_tool_call_output import FunctionToolCallOutput
-
-__all__ = [
- "ResponseListInputItemsResponse",
- "Data",
- "DataMessage",
- "DataComputerCallOutput",
- "DataFunctionCall",
- "DataFunctionCallOutput",
-]
-
-
-class DataMessage(InputMessage):
- id: str
- """The unique ID of the message input."""
-
-
-class DataComputerCallOutput(ComputerToolCallOutput):
- id: str # type: ignore
- """The unique ID of the computer call tool output."""
-
-
-class DataFunctionCall(FunctionToolCall):
- id: str # type: ignore
- """The unique ID of the function tool call."""
-
-
-class DataFunctionCallOutput(FunctionToolCallOutput):
- id: str # type: ignore
- """The unique ID of the function call tool output."""
-
-
-Data: TypeAlias = Annotated[
- Union[
- DataMessage,
- OutputMessage,
- FileSearchToolCall,
- ComputerToolCall,
- DataComputerCallOutput,
- WebSearchToolCall,
- DataFunctionCall,
- DataFunctionCallOutput,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class ResponseListInputItemsResponse(BaseModel):
- data: List[Data]
- """A list of items used to generate this response."""
-
- first_id: str
- """The ID of the first item in the list."""
-
- has_more: bool
- """Whether there are more items available."""
-
- last_id: str
- """The ID of the last item in the list."""
-
- object: Literal["list"]
- """The type of object returned, must be `list`."""
diff --git a/src/digitalocean_genai_sdk/types/response_properties.py b/src/digitalocean_genai_sdk/types/response_properties.py
deleted file mode 100644
index 84746be5..00000000
--- a/src/digitalocean_genai_sdk/types/response_properties.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-from .compound_filter import CompoundFilter
-from .reasoning_effort import ReasoningEffort
-from .comparison_filter import ComparisonFilter
-from .chat.web_search_location import WebSearchLocation
-from .chat.response_format_text import ResponseFormatText
-from .chat.web_search_context_size import WebSearchContextSize
-from .chat.response_format_json_object import ResponseFormatJsonObject
-
-__all__ = [
- "ResponseProperties",
- "Reasoning",
- "Text",
- "TextFormat",
- "TextFormatTextResponseFormatJsonSchema",
- "ToolChoice",
- "ToolChoiceToolChoiceTypes",
- "ToolChoiceToolChoiceFunction",
- "Tool",
- "ToolFileSearchTool",
- "ToolFileSearchToolFilters",
- "ToolFileSearchToolRankingOptions",
- "ToolFunctionTool",
- "ToolComputerTool",
- "ToolWebSearchTool",
- "ToolWebSearchToolUserLocation",
-]
-
-
-class Reasoning(BaseModel):
- effort: Optional[ReasoningEffort] = None
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- generate_summary: Optional[Literal["concise", "detailed"]] = None
- """**computer_use_preview only**
-
- A summary of the reasoning performed by the model. This can be useful for
- debugging and understanding the model's reasoning process. One of `concise` or
- `detailed`.
- """
-
-
-class TextFormatTextResponseFormatJsonSchema(BaseModel):
- schema_: Dict[str, object] = FieldInfo(alias="schema")
- """
- The schema for the response format, described as a JSON Schema object. Learn how
- to build JSON schemas [here](https://json-schema.org/).
- """
-
- type: Literal["json_schema"]
- """The type of response format being defined. Always `json_schema`."""
-
- description: Optional[str] = None
- """
- A description of what the response format is for, used by the model to determine
- how to respond in the format.
- """
-
- name: Optional[str] = None
- """The name of the response format.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- strict: Optional[bool] = None
- """
- Whether to enable strict schema adherence when generating the output. If set to
- true, the model will always follow the exact schema defined in the `schema`
- field. Only a subset of JSON Schema is supported when `strict` is `true`. To
- learn more, read the
- [Structured Outputs guide](/docs/guides/structured-outputs).
- """
-
-
-TextFormat: TypeAlias = Union[ResponseFormatText, TextFormatTextResponseFormatJsonSchema, ResponseFormatJsonObject]
-
-
-class Text(BaseModel):
- format: Optional[TextFormat] = None
- """An object specifying the format that the model must output.
-
- Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
- ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](/docs/guides/structured-outputs).
-
- The default format is `{ "type": "text" }` with no additional options.
-
- **Not recommended for gpt-4o and newer models:**
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
- """
-
-
-class ToolChoiceToolChoiceTypes(BaseModel):
- type: Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
- """The type of hosted tool the model should to use.
-
- Learn more about [built-in tools](/docs/guides/tools).
-
- Allowed values are:
-
- - `file_search`
- - `web_search_preview`
- - `computer_use_preview`
- """
-
-
-class ToolChoiceToolChoiceFunction(BaseModel):
- name: str
- """The name of the function to call."""
-
- type: Literal["function"]
- """For function calling, the type is always `function`."""
-
-
-ToolChoice: TypeAlias = Union[
- Literal["none", "auto", "required"], ToolChoiceToolChoiceTypes, ToolChoiceToolChoiceFunction
-]
-
-ToolFileSearchToolFilters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
-
-
-class ToolFileSearchToolRankingOptions(BaseModel):
- ranker: Optional[Literal["auto", "default-2024-11-15"]] = None
- """The ranker to use for the file search."""
-
- score_threshold: Optional[float] = None
- """
- The score threshold for the file search, a number between 0 and 1. Numbers
- closer to 1 will attempt to return only the most relevant results, but may
- return fewer results.
- """
-
-
-class ToolFileSearchTool(BaseModel):
- type: Literal["file_search"]
- """The type of the file search tool. Always `file_search`."""
-
- vector_store_ids: List[str]
- """The IDs of the vector stores to search."""
-
- filters: Optional[ToolFileSearchToolFilters] = None
- """A filter to apply based on file attributes."""
-
- max_num_results: Optional[int] = None
- """The maximum number of results to return.
-
- This number should be between 1 and 50 inclusive.
- """
-
- ranking_options: Optional[ToolFileSearchToolRankingOptions] = None
- """Ranking options for search."""
-
-
-class ToolFunctionTool(BaseModel):
- name: str
- """The name of the function to call."""
-
- parameters: Dict[str, object]
- """A JSON schema object describing the parameters of the function."""
-
- strict: bool
- """Whether to enforce strict parameter validation. Default `true`."""
-
- type: Literal["function"]
- """The type of the function tool. Always `function`."""
-
- description: Optional[str] = None
- """A description of the function.
-
- Used by the model to determine whether or not to call the function.
- """
-
-
-class ToolComputerTool(BaseModel):
- display_height: float
- """The height of the computer display."""
-
- display_width: float
- """The width of the computer display."""
-
- environment: Literal["mac", "windows", "ubuntu", "browser"]
- """The type of computer environment to control."""
-
- type: Literal["computer_use_preview"]
- """The type of the computer use tool. Always `computer_use_preview`."""
-
-
-class ToolWebSearchToolUserLocation(WebSearchLocation):
- type: Literal["approximate"]
- """The type of location approximation. Always `approximate`."""
-
-
-class ToolWebSearchTool(BaseModel):
- type: Literal["web_search_preview", "web_search_preview_2025_03_11"]
- """The type of the web search tool. One of:
-
- - `web_search_preview`
- - `web_search_preview_2025_03_11`
- """
-
- search_context_size: Optional[WebSearchContextSize] = None
- """
- High level guidance for the amount of context window space to use for the
- search. One of `low`, `medium`, or `high`. `medium` is the default.
- """
-
- user_location: Optional[ToolWebSearchToolUserLocation] = None
- """Approximate location parameters for the search."""
-
-
-Tool: TypeAlias = Union[ToolFileSearchTool, ToolFunctionTool, ToolComputerTool, ToolWebSearchTool]
-
-
-class ResponseProperties(BaseModel):
- instructions: Optional[str] = None
- """
- Inserts a system (or developer) message as the first item in the model's
- context.
-
- When using along with `previous_response_id`, the instructions from a previous
- response will be not be carried over to the next response. This makes it simple
- to swap out system (or developer) messages in new responses.
- """
-
- max_output_tokens: Optional[int] = None
- """
- An upper bound for the number of tokens that can be generated for a response,
- including visible output tokens and [reasoning tokens](/docs/guides/reasoning).
- """
-
- model: Union[
- Literal[
- "o3-mini",
- "o3-mini-2025-01-31",
- "o1",
- "o1-2024-12-17",
- "o1-preview",
- "o1-preview-2024-09-12",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-audio-preview",
- "gpt-4o-audio-preview-2024-10-01",
- "gpt-4o-audio-preview-2024-12-17",
- "gpt-4o-mini-audio-preview",
- "gpt-4o-mini-audio-preview-2024-12-17",
- "gpt-4o-search-preview",
- "gpt-4o-mini-search-preview",
- "gpt-4o-search-preview-2025-03-11",
- "gpt-4o-mini-search-preview-2025-03-11",
- "chatgpt-4o-latest",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0301",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- "o1-pro",
- "o1-pro-2025-03-19",
- "computer-use-preview",
- "computer-use-preview-2025-03-11",
- ],
- str,
- None,
- ] = None
- """Model ID used to generate the response, like `gpt-4o` or `o1`.
-
- OpenAI offers a wide range of models with different capabilities, performance
- characteristics, and price points. Refer to the [model guide](/docs/models) to
- browse and compare available models.
- """
-
- previous_response_id: Optional[str] = None
- """The unique ID of the previous response to the model.
-
- Use this to create multi-turn conversations. Learn more about
- [conversation state](/docs/guides/conversation-state).
- """
-
- reasoning: Optional[Reasoning] = None
- """**o-series models only**
-
- Configuration options for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- """
-
- text: Optional[Text] = None
- """Configuration options for a text response from the model.
-
- Can be plain text or structured JSON data. Learn more:
-
- - [Text inputs and outputs](/docs/guides/text)
- - [Structured Outputs](/docs/guides/structured-outputs)
- """
-
- tool_choice: Optional[ToolChoice] = None
- """
- How the model should select which tool (or tools) to use when generating a
- response. See the `tools` parameter to see how to specify which tools the model
- can call.
- """
-
- tools: Optional[List[Tool]] = None
- """An array of tools the model may call while generating a response.
-
- You can specify which tool to use by setting the `tool_choice` parameter.
-
- The two categories of tools you can provide the model are:
-
- - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
- capabilities, like [web search](/docs/guides/tools-web-search) or
- [file search](/docs/guides/tools-file-search). Learn more about
- [built-in tools](/docs/guides/tools).
- - **Function calls (custom tools)**: Functions that are defined by you, enabling
- the model to call your own code. Learn more about
- [function calling](/docs/guides/function-calling).
- """
-
- truncation: Optional[Literal["auto", "disabled"]] = None
- """The truncation strategy to use for the model response.
-
- - `auto`: If the context of this response and previous ones exceeds the model's
- context window size, the model will truncate the response to fit the context
- window by dropping input items in the middle of the conversation.
- - `disabled` (default): If a model response will exceed the context window size
- for a model, the request will fail with a 400 error.
- """
diff --git a/src/digitalocean_genai_sdk/types/response_retrieve_params.py b/src/digitalocean_genai_sdk/types/response_retrieve_params.py
deleted file mode 100644
index b85dbba1..00000000
--- a/src/digitalocean_genai_sdk/types/response_retrieve_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import TypedDict
-
-from .includable import Includable
-
-__all__ = ["ResponseRetrieveParams"]
-
-
-class ResponseRetrieveParams(TypedDict, total=False):
- include: List[Includable]
- """Specify additional output data to include in the response.
-
- Currently supported values are:
-
- - `file_search_call.results`: Include the search results of
-
- the file search tool call.
-
- - `message.input_image.image_url`: Include image urls from the input message.
- - `computer_call_output.output.image_url`: Include image urls from the computer
- call output.
- """
diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy.py
deleted file mode 100644
index a4c0ce82..00000000
--- a/src/digitalocean_genai_sdk/types/static_chunking_strategy.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-
-__all__ = ["StaticChunkingStrategy"]
-
-
-class StaticChunkingStrategy(BaseModel):
- chunk_overlap_tokens: int
- """The number of tokens that overlap between chunks. The default value is `400`.
-
- Note that the overlap must not exceed half of `max_chunk_size_tokens`.
- """
-
- max_chunk_size_tokens: int
- """The maximum number of tokens in each chunk.
-
- The default value is `800`. The minimum value is `100` and the maximum value is
- `4096`.
- """
diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py
deleted file mode 100644
index c3535404..00000000
--- a/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["StaticChunkingStrategyParam"]
-
-
-class StaticChunkingStrategyParam(TypedDict, total=False):
- chunk_overlap_tokens: Required[int]
- """The number of tokens that overlap between chunks. The default value is `400`.
-
- Note that the overlap must not exceed half of `max_chunk_size_tokens`.
- """
-
- max_chunk_size_tokens: Required[int]
- """The maximum number of tokens in each chunk.
-
- The default value is `800`. The minimum value is `100` and the maximum value is
- `4096`.
- """
diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py
deleted file mode 100644
index 51de3b75..00000000
--- a/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-from .static_chunking_strategy_param import StaticChunkingStrategyParam
-
-__all__ = ["StaticChunkingStrategyRequestParam"]
-
-
-class StaticChunkingStrategyRequestParam(TypedDict, total=False):
- static: Required[StaticChunkingStrategyParam]
-
- type: Required[Literal["static"]]
- """Always `static`."""
diff --git a/src/digitalocean_genai_sdk/types/stop_configuration_param.py b/src/digitalocean_genai_sdk/types/stop_configuration_param.py
deleted file mode 100644
index d3093c7c..00000000
--- a/src/digitalocean_genai_sdk/types/stop_configuration_param.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Optional
-from typing_extensions import TypeAlias
-
-__all__ = ["StopConfigurationParam"]
-
-StopConfigurationParam: TypeAlias = Union[Optional[str], List[str]]
diff --git a/src/digitalocean_genai_sdk/types/thread_create_params.py b/src/digitalocean_genai_sdk/types/thread_create_params.py
deleted file mode 100644
index 7ee77039..00000000
--- a/src/digitalocean_genai_sdk/types/thread_create_params.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .threads.create_message_request_param import CreateMessageRequestParam
-
-__all__ = [
- "ThreadCreateParams",
- "ToolResources",
- "ToolResourcesCodeInterpreter",
- "ToolResourcesFileSearch",
- "ToolResourcesFileSearchVectorStore",
- "ToolResourcesFileSearchVectorStoreChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy",
- "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic",
-]
-
-
-class ThreadCreateParams(TypedDict, total=False):
- messages: Iterable[CreateMessageRequestParam]
- """A list of [messages](/docs/api-reference/messages) to start the thread with."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- tool_resources: Optional[ToolResources]
- """
- A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False):
- type: Required[Literal["auto"]]
- """Always `auto`."""
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False):
- chunk_overlap_tokens: Required[int]
- """The number of tokens that overlap between chunks. The default value is `400`.
-
- Note that the overlap must not exceed half of `max_chunk_size_tokens`.
- """
-
- max_chunk_size_tokens: Required[int]
- """The maximum number of tokens in each chunk.
-
- The default value is `800`. The minimum value is `100` and the maximum value is
- `4096`.
- """
-
-
-class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False):
- static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic]
-
- type: Required[Literal["static"]]
- """Always `static`."""
-
-
-ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
- ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy,
- ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy,
-]
-
-
-class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
- chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy.
- """
-
- file_ids: List[str]
- """A list of [file](/docs/api-reference/files) IDs to add to the vector store.
-
- There can be a maximum of 10000 files in a vector store.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- The [vector store](/docs/api-reference/vector-stores/object) attached to this
- thread. There can be a maximum of 1 vector store attached to the thread.
- """
-
- vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
- """
- A helper to create a [vector store](/docs/api-reference/vector-stores/object)
- with file_ids and attach it to this thread. There can be a maximum of 1 vector
- store attached to the thread.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
diff --git a/src/digitalocean_genai_sdk/types/thread_delete_response.py b/src/digitalocean_genai_sdk/types/thread_delete_response.py
deleted file mode 100644
index 74f09d84..00000000
--- a/src/digitalocean_genai_sdk/types/thread_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ThreadDeleteResponse"]
-
-
-class ThreadDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["thread.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/thread_object.py b/src/digitalocean_genai_sdk/types/thread_object.py
deleted file mode 100644
index 7924dd8f..00000000
--- a/src/digitalocean_genai_sdk/types/thread_object.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ThreadObject", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
-
-
-class ToolResourcesCodeInterpreter(BaseModel):
- file_ids: Optional[List[str]] = None
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearch(BaseModel):
- vector_store_ids: Optional[List[str]] = None
- """
- The [vector store](/docs/api-reference/vector-stores/object) attached to this
- thread. There can be a maximum of 1 vector store attached to the thread.
- """
-
-
-class ToolResources(BaseModel):
- code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
-
- file_search: Optional[ToolResourcesFileSearch] = None
-
-
-class ThreadObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the thread was created."""
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- object: Literal["thread"]
- """The object type, which is always `thread`."""
-
- tool_resources: Optional[ToolResources] = None
- """
- A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
diff --git a/src/digitalocean_genai_sdk/types/thread_update_params.py b/src/digitalocean_genai_sdk/types/thread_update_params.py
deleted file mode 100644
index d952d35b..00000000
--- a/src/digitalocean_genai_sdk/types/thread_update_params.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Optional
-from typing_extensions import TypedDict
-
-__all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
-
-
-class ThreadUpdateParams(TypedDict, total=False):
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- tool_resources: Optional[ToolResources]
- """
- A set of resources that are made available to the assistant's tools in this
- thread. The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- The [vector store](/docs/api-reference/vector-stores/object) attached to this
- thread. There can be a maximum of 1 vector store attached to the thread.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
diff --git a/src/digitalocean_genai_sdk/types/threads/__init__.py b/src/digitalocean_genai_sdk/types/threads/__init__.py
deleted file mode 100644
index 9af8d93a..00000000
--- a/src/digitalocean_genai_sdk/types/threads/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .run_object import RunObject as RunObject
-from .message_object import MessageObject as MessageObject
-from .run_list_params import RunListParams as RunListParams
-from .run_create_params import RunCreateParams as RunCreateParams
-from .run_list_response import RunListResponse as RunListResponse
-from .run_update_params import RunUpdateParams as RunUpdateParams
-from .truncation_object import TruncationObject as TruncationObject
-from .message_list_params import MessageListParams as MessageListParams
-from .message_create_params import MessageCreateParams as MessageCreateParams
-from .message_list_response import MessageListResponse as MessageListResponse
-from .message_update_params import MessageUpdateParams as MessageUpdateParams
-from .run_create_run_params import RunCreateRunParams as RunCreateRunParams
-from .message_delete_response import MessageDeleteResponse as MessageDeleteResponse
-from .truncation_object_param import TruncationObjectParam as TruncationObjectParam
-from .create_message_request_param import CreateMessageRequestParam as CreateMessageRequestParam
-from .run_submit_tool_outputs_params import RunSubmitToolOutputsParams as RunSubmitToolOutputsParams
-from .message_content_image_url_object import MessageContentImageURLObject as MessageContentImageURLObject
-from .assistants_api_tool_choice_option import AssistantsAPIToolChoiceOption as AssistantsAPIToolChoiceOption
-from .message_content_image_file_object import MessageContentImageFileObject as MessageContentImageFileObject
-from .assistant_tools_file_search_type_only import AssistantToolsFileSearchTypeOnly as AssistantToolsFileSearchTypeOnly
-from .message_content_image_url_object_param import (
- MessageContentImageURLObjectParam as MessageContentImageURLObjectParam,
-)
-from .assistants_api_tool_choice_option_param import (
- AssistantsAPIToolChoiceOptionParam as AssistantsAPIToolChoiceOptionParam,
-)
-from .message_content_image_file_object_param import (
- MessageContentImageFileObjectParam as MessageContentImageFileObjectParam,
-)
-from .assistant_tools_file_search_type_only_param import (
- AssistantToolsFileSearchTypeOnlyParam as AssistantToolsFileSearchTypeOnlyParam,
-)
diff --git a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py b/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py
deleted file mode 100644
index 6708bff3..00000000
--- a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["AssistantToolsFileSearchTypeOnly"]
-
-
-class AssistantToolsFileSearchTypeOnly(BaseModel):
- type: Literal["file_search"]
- """The type of tool being defined: `file_search`"""
diff --git a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py b/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py
deleted file mode 100644
index f0a48b2c..00000000
--- a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["AssistantToolsFileSearchTypeOnlyParam"]
-
-
-class AssistantToolsFileSearchTypeOnlyParam(TypedDict, total=False):
- type: Required[Literal["file_search"]]
- """The type of tool being defined: `file_search`"""
diff --git a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py b/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py
deleted file mode 100644
index af7be1f7..00000000
--- a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from ..._models import BaseModel
-
-__all__ = ["AssistantsAPIToolChoiceOption", "AssistantsNamedToolChoice", "AssistantsNamedToolChoiceFunction"]
-
-
-class AssistantsNamedToolChoiceFunction(BaseModel):
- name: str
- """The name of the function to call."""
-
-
-class AssistantsNamedToolChoice(BaseModel):
- type: Literal["function", "code_interpreter", "file_search"]
- """The type of the tool. If type is `function`, the function name must be set"""
-
- function: Optional[AssistantsNamedToolChoiceFunction] = None
-
-
-AssistantsAPIToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantsNamedToolChoice]
diff --git a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py b/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py
deleted file mode 100644
index 10f98f89..00000000
--- a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = ["AssistantsAPIToolChoiceOptionParam", "AssistantsNamedToolChoice", "AssistantsNamedToolChoiceFunction"]
-
-
-class AssistantsNamedToolChoiceFunction(TypedDict, total=False):
- name: Required[str]
- """The name of the function to call."""
-
-
-class AssistantsNamedToolChoice(TypedDict, total=False):
- type: Required[Literal["function", "code_interpreter", "file_search"]]
- """The type of the tool. If type is `function`, the function name must be set"""
-
- function: AssistantsNamedToolChoiceFunction
-
-
-AssistantsAPIToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantsNamedToolChoice]
diff --git a/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py b/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py
deleted file mode 100644
index 64c2a781..00000000
--- a/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..assistant_tools_code_param import AssistantToolsCodeParam
-from .message_content_image_url_object_param import MessageContentImageURLObjectParam
-from .message_content_image_file_object_param import MessageContentImageFileObjectParam
-from .assistant_tools_file_search_type_only_param import AssistantToolsFileSearchTypeOnlyParam
-
-__all__ = [
- "CreateMessageRequestParam",
- "ContentArrayOfContentPart",
- "ContentArrayOfContentPartMessageRequestContentTextObject",
- "Attachment",
- "AttachmentTool",
-]
-
-
-class ContentArrayOfContentPartMessageRequestContentTextObject(TypedDict, total=False):
- text: Required[str]
- """Text content to be sent to the model"""
-
- type: Required[Literal["text"]]
- """Always `text`."""
-
-
-ContentArrayOfContentPart: TypeAlias = Union[
- MessageContentImageFileObjectParam,
- MessageContentImageURLObjectParam,
- ContentArrayOfContentPartMessageRequestContentTextObject,
-]
-
-AttachmentTool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchTypeOnlyParam]
-
-
-class Attachment(TypedDict, total=False):
- file_id: str
- """The ID of the file to attach to the message."""
-
- tools: Iterable[AttachmentTool]
- """The tools to add this file to."""
-
-
-class CreateMessageRequestParam(TypedDict, total=False):
- content: Required[Union[str, Iterable[ContentArrayOfContentPart]]]
- """The text contents of the message."""
-
- role: Required[Literal["user", "assistant"]]
- """The role of the entity that is creating the message. Allowed values include:
-
- - `user`: Indicates the message is sent by an actual user and should be used in
- most cases to represent user-generated messages.
- - `assistant`: Indicates the message is generated by the assistant. Use this
- value to insert messages from the assistant into the conversation.
- """
-
- attachments: Optional[Iterable[Attachment]]
- """A list of files attached to the message, and the tools they should be added to."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py
deleted file mode 100644
index b22ef410..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["MessageContentImageFileObject", "ImageFile"]
-
-
-class ImageFile(BaseModel):
- file_id: str
- """The [File](/docs/api-reference/files) ID of the image in the message content.
-
- Set `purpose="vision"` when uploading the File if you need to later display the
- file content.
- """
-
- detail: Optional[Literal["auto", "low", "high"]] = None
- """Specifies the detail level of the image if specified by the user.
-
- `low` uses fewer tokens, you can opt in to high resolution using `high`.
- """
-
-
-class MessageContentImageFileObject(BaseModel):
- image_file: ImageFile
-
- type: Literal["image_file"]
- """Always `image_file`."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py
deleted file mode 100644
index 734dcf15..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["MessageContentImageFileObjectParam", "ImageFile"]
-
-
-class ImageFile(TypedDict, total=False):
- file_id: Required[str]
- """The [File](/docs/api-reference/files) ID of the image in the message content.
-
- Set `purpose="vision"` when uploading the File if you need to later display the
- file content.
- """
-
- detail: Literal["auto", "low", "high"]
- """Specifies the detail level of the image if specified by the user.
-
- `low` uses fewer tokens, you can opt in to high resolution using `high`.
- """
-
-
-class MessageContentImageFileObjectParam(TypedDict, total=False):
- image_file: Required[ImageFile]
-
- type: Required[Literal["image_file"]]
- """Always `image_file`."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py
deleted file mode 100644
index 9a7f980b..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["MessageContentImageURLObject", "ImageURL"]
-
-
-class ImageURL(BaseModel):
- url: str
- """
- The external URL of the image, must be a supported image types: jpeg, jpg, png,
- gif, webp.
- """
-
- detail: Optional[Literal["auto", "low", "high"]] = None
- """Specifies the detail level of the image.
-
- `low` uses fewer tokens, you can opt in to high resolution using `high`. Default
- value is `auto`
- """
-
-
-class MessageContentImageURLObject(BaseModel):
- image_url: ImageURL
-
- type: Literal["image_url"]
- """The type of the content part."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py
deleted file mode 100644
index f3f777c4..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["MessageContentImageURLObjectParam", "ImageURL"]
-
-
-class ImageURL(TypedDict, total=False):
- url: Required[str]
- """
- The external URL of the image, must be a supported image types: jpeg, jpg, png,
- gif, webp.
- """
-
- detail: Literal["auto", "low", "high"]
- """Specifies the detail level of the image.
-
- `low` uses fewer tokens, you can opt in to high resolution using `high`. Default
- value is `auto`
- """
-
-
-class MessageContentImageURLObjectParam(TypedDict, total=False):
- image_url: Required[ImageURL]
-
- type: Required[Literal["image_url"]]
- """The type of the content part."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_create_params.py b/src/digitalocean_genai_sdk/types/threads/message_create_params.py
deleted file mode 100644
index d9a4cd40..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_create_params.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..assistant_tools_code_param import AssistantToolsCodeParam
-from .message_content_image_url_object_param import MessageContentImageURLObjectParam
-from .message_content_image_file_object_param import MessageContentImageFileObjectParam
-from .assistant_tools_file_search_type_only_param import AssistantToolsFileSearchTypeOnlyParam
-
-__all__ = [
- "MessageCreateParams",
- "ContentArrayOfContentPart",
- "ContentArrayOfContentPartMessageRequestContentTextObject",
- "Attachment",
- "AttachmentTool",
-]
-
-
-class MessageCreateParams(TypedDict, total=False):
- content: Required[Union[str, Iterable[ContentArrayOfContentPart]]]
- """The text contents of the message."""
-
- role: Required[Literal["user", "assistant"]]
- """The role of the entity that is creating the message. Allowed values include:
-
- - `user`: Indicates the message is sent by an actual user and should be used in
- most cases to represent user-generated messages.
- - `assistant`: Indicates the message is generated by the assistant. Use this
- value to insert messages from the assistant into the conversation.
- """
-
- attachments: Optional[Iterable[Attachment]]
- """A list of files attached to the message, and the tools they should be added to."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
-
-class ContentArrayOfContentPartMessageRequestContentTextObject(TypedDict, total=False):
- text: Required[str]
- """Text content to be sent to the model"""
-
- type: Required[Literal["text"]]
- """Always `text`."""
-
-
-ContentArrayOfContentPart: TypeAlias = Union[
- MessageContentImageFileObjectParam,
- MessageContentImageURLObjectParam,
- ContentArrayOfContentPartMessageRequestContentTextObject,
-]
-
-AttachmentTool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchTypeOnlyParam]
-
-
-class Attachment(TypedDict, total=False):
- file_id: str
- """The ID of the file to attach to the message."""
-
- tools: Iterable[AttachmentTool]
- """The tools to add this file to."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_delete_response.py b/src/digitalocean_genai_sdk/types/threads/message_delete_response.py
deleted file mode 100644
index c86408dc..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["MessageDeleteResponse"]
-
-
-class MessageDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["thread.message.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/threads/message_list_params.py b/src/digitalocean_genai_sdk/types/threads/message_list_params.py
deleted file mode 100644
index a7c22a66..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_list_params.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["MessageListParams"]
-
-
-class MessageListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
-
- run_id: str
- """Filter messages by the run ID that generated them."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_list_response.py b/src/digitalocean_genai_sdk/types/threads/message_list_response.py
deleted file mode 100644
index f710da32..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ..._models import BaseModel
-from .message_object import MessageObject
-
-__all__ = ["MessageListResponse"]
-
-
-class MessageListResponse(BaseModel):
- data: List[MessageObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/threads/message_object.py b/src/digitalocean_genai_sdk/types/threads/message_object.py
deleted file mode 100644
index b2cb3711..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_object.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from ..._models import BaseModel
-from ..assistant_tools_code import AssistantToolsCode
-from .message_content_image_url_object import MessageContentImageURLObject
-from .message_content_image_file_object import MessageContentImageFileObject
-from .assistant_tools_file_search_type_only import AssistantToolsFileSearchTypeOnly
-
-__all__ = [
- "MessageObject",
- "Attachment",
- "AttachmentTool",
- "Content",
- "ContentMessageContentTextObject",
- "ContentMessageContentTextObjectText",
- "ContentMessageContentTextObjectTextAnnotation",
- "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject",
- "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation",
- "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject",
- "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath",
- "ContentMessageContentRefusalObject",
- "IncompleteDetails",
-]
-
-AttachmentTool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearchTypeOnly]
-
-
-class Attachment(BaseModel):
- file_id: Optional[str] = None
- """The ID of the file to attach to the message."""
-
- tools: Optional[List[AttachmentTool]] = None
- """The tools to add this file to."""
-
-
-class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation(
- BaseModel
-):
- file_id: str
- """The ID of the specific File the citation is from."""
-
-
-class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject(BaseModel):
- end_index: int
-
- file_citation: (
- ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation
- )
-
- start_index: int
-
- text: str
- """The text in the message content that needs to be replaced."""
-
- type: Literal["file_citation"]
- """Always `file_citation`."""
-
-
-class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath(BaseModel):
- file_id: str
- """The ID of the file that was generated."""
-
-
-class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject(BaseModel):
- end_index: int
-
- file_path: ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath
-
- start_index: int
-
- text: str
- """The text in the message content that needs to be replaced."""
-
- type: Literal["file_path"]
- """Always `file_path`."""
-
-
-ContentMessageContentTextObjectTextAnnotation: TypeAlias = Union[
- ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject,
- ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject,
-]
-
-
-class ContentMessageContentTextObjectText(BaseModel):
- annotations: List[ContentMessageContentTextObjectTextAnnotation]
-
- value: str
- """The data that makes up the text."""
-
-
-class ContentMessageContentTextObject(BaseModel):
- text: ContentMessageContentTextObjectText
-
- type: Literal["text"]
- """Always `text`."""
-
-
-class ContentMessageContentRefusalObject(BaseModel):
- refusal: str
-
- type: Literal["refusal"]
- """Always `refusal`."""
-
-
-Content: TypeAlias = Union[
- MessageContentImageFileObject,
- MessageContentImageURLObject,
- ContentMessageContentTextObject,
- ContentMessageContentRefusalObject,
-]
-
-
-class IncompleteDetails(BaseModel):
- reason: Literal["content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed"]
- """The reason the message is incomplete."""
-
-
-class MessageObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- assistant_id: Optional[str] = None
- """
- If applicable, the ID of the [assistant](/docs/api-reference/assistants) that
- authored this message.
- """
-
- attachments: Optional[List[Attachment]] = None
- """A list of files attached to the message, and the tools they were added to."""
-
- completed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the message was completed."""
-
- content: List[Content]
- """The content of the message in array of text and/or images."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the message was created."""
-
- incomplete_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the message was marked as incomplete."""
-
- incomplete_details: Optional[IncompleteDetails] = None
- """On an incomplete message, details about why the message is incomplete."""
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- object: Literal["thread.message"]
- """The object type, which is always `thread.message`."""
-
- role: Literal["user", "assistant"]
- """The entity that produced the message. One of `user` or `assistant`."""
-
- run_id: Optional[str] = None
- """
- The ID of the [run](/docs/api-reference/runs) associated with the creation of
- this message. Value is `null` when messages are created manually using the
- create message or create thread endpoints.
- """
-
- status: Literal["in_progress", "incomplete", "completed"]
- """
- The status of the message, which can be either `in_progress`, `incomplete`, or
- `completed`.
- """
-
- thread_id: str
- """The [thread](/docs/api-reference/threads) ID that this message belongs to."""
diff --git a/src/digitalocean_genai_sdk/types/threads/message_update_params.py b/src/digitalocean_genai_sdk/types/threads/message_update_params.py
deleted file mode 100644
index a2e25260..00000000
--- a/src/digitalocean_genai_sdk/types/threads/message_update_params.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["MessageUpdateParams"]
-
-
-class MessageUpdateParams(TypedDict, total=False):
- thread_id: Required[str]
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/run_create_params.py b/src/digitalocean_genai_sdk/types/threads/run_create_params.py
deleted file mode 100644
index 43d0611a..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_create_params.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .truncation_object_param import TruncationObjectParam
-from ..assistant_tools_code_param import AssistantToolsCodeParam
-from ..create_thread_request_param import CreateThreadRequestParam
-from ..assistant_tools_function_param import AssistantToolsFunctionParam
-from ..assistant_tools_file_search_param import AssistantToolsFileSearchParam
-from .assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam
-from ..assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-
-__all__ = ["RunCreateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "Tool"]
-
-
-class RunCreateParams(TypedDict, total=False):
- assistant_id: Required[str]
- """
- The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
- """
-
- instructions: Optional[str]
- """Override the default system message of the assistant.
-
- This is useful for modifying the behavior on a per-run basis.
- """
-
- max_completion_tokens: Optional[int]
- """
- The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
- """
-
- max_prompt_tokens: Optional[int]
- """The maximum number of prompt tokens that may be used over the course of the run.
-
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- model: Union[
- str,
- Literal[
- "gpt-4o",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-mini",
- "gpt-4o-mini-2024-07-18",
- "gpt-4.5-preview",
- "gpt-4.5-preview-2025-02-27",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-turbo-preview",
- "gpt-4-1106-preview",
- "gpt-4-vision-preview",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- "gpt-4-32k-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-16k-0613",
- ],
- None,
- ]
- """The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run.
-
- If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
- """
-
- parallel_tool_calls: bool
- """
- Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOptionParam]
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- stream: Optional[bool]
- """
- If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
- """
-
- thread: CreateThreadRequestParam
- """Options to create a new thread.
-
- If no thread is provided when running a request, an empty thread will be
- created.
- """
-
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam]
- """
- Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
- """
-
- tool_resources: Optional[ToolResources]
- """A set of resources that are used by the assistant's tools.
-
- The resources are specific to the type of tool. For example, the
- `code_interpreter` tool requires a list of file IDs, while the `file_search`
- tool requires a list of vector store IDs.
- """
-
- tools: Optional[Iterable[Tool]]
- """Override the tools the assistant can use for this run.
-
- This is useful for modifying the behavior on a per-run basis.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
- """
-
- truncation_strategy: Optional[TruncationObjectParam]
- """Controls for how a thread will be truncated prior to the run.
-
- Use this to control the intial context window of the run.
- """
-
-
-class ToolResourcesCodeInterpreter(TypedDict, total=False):
- file_ids: List[str]
- """
- A list of [file](/docs/api-reference/files) IDs made available to the
- `code_interpreter` tool. There can be a maximum of 20 files associated with the
- tool.
- """
-
-
-class ToolResourcesFileSearch(TypedDict, total=False):
- vector_store_ids: List[str]
- """
- The ID of the [vector store](/docs/api-reference/vector-stores/object) attached
- to this assistant. There can be a maximum of 1 vector store attached to the
- assistant.
- """
-
-
-class ToolResources(TypedDict, total=False):
- code_interpreter: ToolResourcesCodeInterpreter
-
- file_search: ToolResourcesFileSearch
-
-
-Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam]
diff --git a/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py b/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py
deleted file mode 100644
index 694c7eea..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..reasoning_effort import ReasoningEffort
-from .truncation_object_param import TruncationObjectParam
-from ..assistant_supported_models import AssistantSupportedModels
-from ..assistant_tools_code_param import AssistantToolsCodeParam
-from .create_message_request_param import CreateMessageRequestParam
-from ..assistant_tools_function_param import AssistantToolsFunctionParam
-from ..assistant_tools_file_search_param import AssistantToolsFileSearchParam
-from .assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam
-from ..assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam
-
-__all__ = ["RunCreateRunParams", "Tool"]
-
-
-class RunCreateRunParams(TypedDict, total=False):
- assistant_id: Required[str]
- """
- The ID of the [assistant](/docs/api-reference/assistants) to use to execute this
- run.
- """
-
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]]
- """A list of additional fields to include in the response.
-
- Currently the only supported value is
- `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
- search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
- additional_instructions: Optional[str]
- """Appends additional instructions at the end of the instructions for the run.
-
- This is useful for modifying the behavior on a per-run basis without overriding
- other instructions.
- """
-
- additional_messages: Optional[Iterable[CreateMessageRequestParam]]
- """Adds additional messages to the thread before creating the run."""
-
- instructions: Optional[str]
- """
- Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of
- the assistant. This is useful for modifying the behavior on a per-run basis.
- """
-
- max_completion_tokens: Optional[int]
- """
- The maximum number of completion tokens that may be used over the course of the
- run. The run will make a best effort to use only the number of completion tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- completion tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
- """
-
- max_prompt_tokens: Optional[int]
- """The maximum number of prompt tokens that may be used over the course of the run.
-
- The run will make a best effort to use only the number of prompt tokens
- specified, across multiple turns of the run. If the run exceeds the number of
- prompt tokens specified, the run will end with status `incomplete`. See
- `incomplete_details` for more info.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- model: Union[str, AssistantSupportedModels, None]
- """The ID of the [Model](/docs/api-reference/models) to be used to execute this
- run.
-
- If a value is provided here, it will override the model associated with the
- assistant. If not, the model associated with the assistant will be used.
- """
-
- parallel_tool_calls: bool
- """
- Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
- """
-
- reasoning_effort: Optional[ReasoningEffort]
- """**o-series models only**
-
- Constrains effort on reasoning for
- [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- result in faster responses and fewer tokens used on reasoning in a response.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOptionParam]
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- stream: Optional[bool]
- """
- If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
- """
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
- """
-
- tool_choice: Optional[AssistantsAPIToolChoiceOptionParam]
- """
- Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
- """
-
- tools: Optional[Iterable[Tool]]
- """Override the tools the assistant can use for this run.
-
- This is useful for modifying the behavior on a per-run basis.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or temperature but not both.
- """
-
- truncation_strategy: Optional[TruncationObjectParam]
- """Controls for how a thread will be truncated prior to the run.
-
- Use this to control the intial context window of the run.
- """
-
-
-Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam]
diff --git a/src/digitalocean_genai_sdk/types/threads/run_list_params.py b/src/digitalocean_genai_sdk/types/threads/run_list_params.py
deleted file mode 100644
index fbea54f6..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_list_params.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["RunListParams"]
-
-
-class RunListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/run_list_response.py b/src/digitalocean_genai_sdk/types/threads/run_list_response.py
deleted file mode 100644
index 899bd0f9..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ..._models import BaseModel
-from .run_object import RunObject
-
-__all__ = ["RunListResponse"]
-
-
-class RunListResponse(BaseModel):
- data: List[RunObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/threads/run_object.py b/src/digitalocean_genai_sdk/types/threads/run_object.py
deleted file mode 100644
index fa89f4b4..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_object.py
+++ /dev/null
@@ -1,265 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from ..._models import BaseModel
-from .truncation_object import TruncationObject
-from ..assistant_tools_code import AssistantToolsCode
-from ..assistant_tools_function import AssistantToolsFunction
-from ..assistant_tools_file_search import AssistantToolsFileSearch
-from .assistants_api_tool_choice_option import AssistantsAPIToolChoiceOption
-from ..assistants_api_response_format_option import AssistantsAPIResponseFormatOption
-
-__all__ = [
- "RunObject",
- "IncompleteDetails",
- "LastError",
- "RequiredAction",
- "RequiredActionSubmitToolOutputs",
- "RequiredActionSubmitToolOutputsToolCall",
- "RequiredActionSubmitToolOutputsToolCallFunction",
- "Tool",
- "Usage",
-]
-
-
-class IncompleteDetails(BaseModel):
- reason: Optional[Literal["max_completion_tokens", "max_prompt_tokens"]] = None
- """The reason why the run is incomplete.
-
- This will point to which specific token limit was reached over the course of the
- run.
- """
-
-
-class LastError(BaseModel):
- code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"]
- """One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`."""
-
- message: str
- """A human-readable description of the error."""
-
-
-class RequiredActionSubmitToolOutputsToolCallFunction(BaseModel):
- arguments: str
- """The arguments that the model expects you to pass to the function."""
-
- name: str
- """The name of the function."""
-
-
-class RequiredActionSubmitToolOutputsToolCall(BaseModel):
- id: str
- """The ID of the tool call.
-
- This ID must be referenced when you submit the tool outputs in using the
- [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs)
- endpoint.
- """
-
- function: RequiredActionSubmitToolOutputsToolCallFunction
- """The function definition."""
-
- type: Literal["function"]
- """The type of tool call the output is required for.
-
- For now, this is always `function`.
- """
-
-
-class RequiredActionSubmitToolOutputs(BaseModel):
- tool_calls: List[RequiredActionSubmitToolOutputsToolCall]
- """A list of the relevant tool calls."""
-
-
-class RequiredAction(BaseModel):
- submit_tool_outputs: RequiredActionSubmitToolOutputs
- """Details on the tool outputs needed for this run to continue."""
-
- type: Literal["submit_tool_outputs"]
- """For now, this is always `submit_tool_outputs`."""
-
-
-Tool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearch, AssistantToolsFunction]
-
-
-class Usage(BaseModel):
- completion_tokens: int
- """Number of completion tokens used over the course of the run."""
-
- prompt_tokens: int
- """Number of prompt tokens used over the course of the run."""
-
- total_tokens: int
- """Total number of tokens used (prompt + completion)."""
-
-
-class RunObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- assistant_id: str
- """
- The ID of the [assistant](/docs/api-reference/assistants) used for execution of
- this run.
- """
-
- cancelled_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run was cancelled."""
-
- completed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run was completed."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the run was created."""
-
- expires_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run will expire."""
-
- failed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run failed."""
-
- incomplete_details: Optional[IncompleteDetails] = None
- """Details on why the run is incomplete.
-
- Will be `null` if the run is not incomplete.
- """
-
- instructions: str
- """
- The instructions that the [assistant](/docs/api-reference/assistants) used for
- this run.
- """
-
- last_error: Optional[LastError] = None
- """The last error associated with this run. Will be `null` if there are no errors."""
-
- max_completion_tokens: Optional[int] = None
- """
- The maximum number of completion tokens specified to have been used over the
- course of the run.
- """
-
- max_prompt_tokens: Optional[int] = None
- """
- The maximum number of prompt tokens specified to have been used over the course
- of the run.
- """
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- model: str
- """
- The model that the [assistant](/docs/api-reference/assistants) used for this
- run.
- """
-
- object: Literal["thread.run"]
- """The object type, which is always `thread.run`."""
-
- parallel_tool_calls: bool
- """
- Whether to enable
- [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling)
- during tool use.
- """
-
- required_action: Optional[RequiredAction] = None
- """Details on the action required to continue the run.
-
- Will be `null` if no action is required.
- """
-
- response_format: Optional[AssistantsAPIResponseFormatOption] = None
- """Specifies the format that the model must output.
-
- Compatible with [GPT-4o](/docs/models#gpt-4o),
- [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
- since `gpt-3.5-turbo-1106`.
-
- Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
- Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the [Structured Outputs guide](/docs/guides/structured-outputs).
-
- Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
- message the model generates is valid JSON.
-
- **Important:** when using JSON mode, you **must** also instruct the model to
- produce JSON yourself via a system or user message. Without this, the model may
- generate an unending stream of whitespace until the generation reaches the token
- limit, resulting in a long-running and seemingly "stuck" request. Also note that
- the message content may be partially cut off if `finish_reason="length"`, which
- indicates the generation exceeded `max_tokens` or the conversation exceeded the
- max context length.
- """
-
- started_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run was started."""
-
- status: Literal[
- "queued",
- "in_progress",
- "requires_action",
- "cancelling",
- "cancelled",
- "failed",
- "completed",
- "incomplete",
- "expired",
- ]
- """
- The status of the run, which can be either `queued`, `in_progress`,
- `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
- `incomplete`, or `expired`.
- """
-
- thread_id: str
- """
- The ID of the [thread](/docs/api-reference/threads) that was executed on as a
- part of this run.
- """
-
- tool_choice: Optional[AssistantsAPIToolChoiceOption] = None
- """
- Controls which (if any) tool is called by the model. `none` means the model will
- not call any tools and instead generates a message. `auto` is the default value
- and means the model can pick between generating a message or calling one or more
- tools. `required` means the model must call one or more tools before responding
- to the user. Specifying a particular tool like `{"type": "file_search"}` or
- `{"type": "function", "function": {"name": "my_function"}}` forces the model to
- call that tool.
- """
-
- tools: List[Tool]
- """
- The list of tools that the [assistant](/docs/api-reference/assistants) used for
- this run.
- """
-
- truncation_strategy: Optional[TruncationObject] = None
- """Controls for how a thread will be truncated prior to the run.
-
- Use this to control the intial context window of the run.
- """
-
- usage: Optional[Usage] = None
- """Usage statistics related to the run.
-
- This value will be `null` if the run is not in a terminal state (i.e.
- `in_progress`, `queued`, etc.).
- """
-
- temperature: Optional[float] = None
- """The sampling temperature used for this run. If not set, defaults to 1."""
-
- top_p: Optional[float] = None
- """The nucleus sampling value used for this run. If not set, defaults to 1."""
diff --git a/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py b/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py
deleted file mode 100644
index 77ab84ba..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["RunSubmitToolOutputsParams", "ToolOutput"]
-
-
-class RunSubmitToolOutputsParams(TypedDict, total=False):
- thread_id: Required[str]
-
- tool_outputs: Required[Iterable[ToolOutput]]
- """A list of tools for which the outputs are being submitted."""
-
- stream: Optional[bool]
- """
- If `true`, returns a stream of events that happen during the Run as server-sent
- events, terminating when the Run enters a terminal state with a `data: [DONE]`
- message.
- """
-
-
-class ToolOutput(TypedDict, total=False):
- output: str
- """The output of the tool call to be submitted to continue the run."""
-
- tool_call_id: str
- """
- The ID of the tool call in the `required_action` object within the run object
- the output is being submitted for.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/run_update_params.py b/src/digitalocean_genai_sdk/types/threads/run_update_params.py
deleted file mode 100644
index 7b84a9b5..00000000
--- a/src/digitalocean_genai_sdk/types/threads/run_update_params.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["RunUpdateParams"]
-
-
-class RunUpdateParams(TypedDict, total=False):
- thread_id: Required[str]
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/runs/__init__.py b/src/digitalocean_genai_sdk/types/threads/runs/__init__.py
deleted file mode 100644
index 3cab1f9c..00000000
--- a/src/digitalocean_genai_sdk/types/threads/runs/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .run_step_object import RunStepObject as RunStepObject
-from .step_list_params import StepListParams as StepListParams
-from .step_list_response import StepListResponse as StepListResponse
-from .step_retrieve_params import StepRetrieveParams as StepRetrieveParams
diff --git a/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py b/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py
deleted file mode 100644
index 3ede68fa..00000000
--- a/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from ...._models import BaseModel
-from ...file_search_ranker import FileSearchRanker
-
-__all__ = [
- "RunStepObject",
- "LastError",
- "StepDetails",
- "StepDetailsRunStepDetailsMessageCreationObject",
- "StepDetailsRunStepDetailsMessageCreationObjectMessageCreation",
- "StepDetailsRunStepDetailsToolCallsObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCall",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject",
- "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction",
- "Usage",
-]
-
-
-class LastError(BaseModel):
- code: Literal["server_error", "rate_limit_exceeded"]
- """One of `server_error` or `rate_limit_exceeded`."""
-
- message: str
- """A human-readable description of the error."""
-
-
-class StepDetailsRunStepDetailsMessageCreationObjectMessageCreation(BaseModel):
- message_id: str
- """The ID of the message that was created by this run step."""
-
-
-class StepDetailsRunStepDetailsMessageCreationObject(BaseModel):
- message_creation: StepDetailsRunStepDetailsMessageCreationObjectMessageCreation
-
- type: Literal["message_creation"]
- """Always `message_creation`."""
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject(
- BaseModel
-):
- logs: str
- """The text output from the Code Interpreter tool call."""
-
- type: Literal["logs"]
- """Always `logs`."""
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage(
- BaseModel
-):
- file_id: str
- """The [file](/docs/api-reference/files) ID of the image."""
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject(
- BaseModel
-):
- image: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage
-
- type: Literal["image"]
- """Always `image`."""
-
-
-StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput: TypeAlias = Union[
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject,
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject,
-]
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter(BaseModel):
- input: str
- """The input to the Code Interpreter tool call."""
-
- outputs: List[
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput
- ]
- """The outputs from the Code Interpreter tool call.
-
- Code Interpreter can output one or more items, including text (`logs`) or images
- (`image`). Each of these are represented by a different object type.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject(BaseModel):
- id: str
- """The ID of the tool call."""
-
- code_interpreter: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter
- """The Code Interpreter tool call definition."""
-
- type: Literal["code_interpreter"]
- """The type of tool call.
-
- This is always going to be `code_interpreter` for this type of tool call.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions(
- BaseModel
-):
- ranker: FileSearchRanker
- """The ranker to use for the file search.
-
- If not specified will use the `auto` ranker.
- """
-
- score_threshold: float
- """The score threshold for the file search.
-
- All values must be a floating point number between 0 and 1.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent(
- BaseModel
-):
- text: Optional[str] = None
- """The text content of the file."""
-
- type: Optional[Literal["text"]] = None
- """The type of the content."""
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult(
- BaseModel
-):
- file_id: str
- """The ID of the file that result was found in."""
-
- file_name: str
- """The name of the file that result was found in."""
-
- score: float
- """The score of the result.
-
- All values must be a floating point number between 0 and 1.
- """
-
- content: Optional[
- List[
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent
- ]
- ] = None
- """The content of the result that was found.
-
- The content is only included if requested via the include query parameter.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch(BaseModel):
- ranking_options: Optional[
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions
- ] = None
- """The ranking options for the file search."""
-
- results: Optional[
- List[StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult]
- ] = None
- """The results of the file search."""
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject(BaseModel):
- id: str
- """The ID of the tool call object."""
-
- file_search: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch
- """For now, this is always going to be an empty object."""
-
- type: Literal["file_search"]
- """The type of tool call.
-
- This is always going to be `file_search` for this type of tool call.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction(BaseModel):
- arguments: str
- """The arguments passed to the function."""
-
- name: str
- """The name of the function."""
-
- output: Optional[str] = None
- """The output of the function.
-
- This will be `null` if the outputs have not been
- [submitted](/docs/api-reference/runs/submitToolOutputs) yet.
- """
-
-
-class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject(BaseModel):
- id: str
- """The ID of the tool call object."""
-
- function: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction
- """The definition of the function that was called."""
-
- type: Literal["function"]
- """The type of tool call.
-
- This is always going to be `function` for this type of tool call.
- """
-
-
-StepDetailsRunStepDetailsToolCallsObjectToolCall: TypeAlias = Union[
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject,
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject,
- StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject,
-]
-
-
-class StepDetailsRunStepDetailsToolCallsObject(BaseModel):
- tool_calls: List[StepDetailsRunStepDetailsToolCallsObjectToolCall]
- """An array of tool calls the run step was involved in.
-
- These can be associated with one of three types of tools: `code_interpreter`,
- `file_search`, or `function`.
- """
-
- type: Literal["tool_calls"]
- """Always `tool_calls`."""
-
-
-StepDetails: TypeAlias = Union[StepDetailsRunStepDetailsMessageCreationObject, StepDetailsRunStepDetailsToolCallsObject]
-
-
-class Usage(BaseModel):
- completion_tokens: int
- """Number of completion tokens used over the course of the run step."""
-
- prompt_tokens: int
- """Number of prompt tokens used over the course of the run step."""
-
- total_tokens: int
- """Total number of tokens used (prompt + completion)."""
-
-
-class RunStepObject(BaseModel):
- id: str
- """The identifier of the run step, which can be referenced in API endpoints."""
-
- assistant_id: str
- """
- The ID of the [assistant](/docs/api-reference/assistants) associated with the
- run step.
- """
-
- cancelled_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run step was cancelled."""
-
- completed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run step completed."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the run step was created."""
-
- expired_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run step expired.
-
- A step is considered expired if the parent run is expired.
- """
-
- failed_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the run step failed."""
-
- last_error: Optional[LastError] = None
- """The last error associated with this run step.
-
- Will be `null` if there are no errors.
- """
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- object: Literal["thread.run.step"]
- """The object type, which is always `thread.run.step`."""
-
- run_id: str
- """The ID of the [run](/docs/api-reference/runs) that this run step is a part of."""
-
- status: Literal["in_progress", "cancelled", "failed", "completed", "expired"]
- """
- The status of the run step, which can be either `in_progress`, `cancelled`,
- `failed`, `completed`, or `expired`.
- """
-
- step_details: StepDetails
- """The details of the run step."""
-
- thread_id: str
- """The ID of the [thread](/docs/api-reference/threads) that was run."""
-
- type: Literal["message_creation", "tool_calls"]
- """The type of run step, which can be either `message_creation` or `tool_calls`."""
-
- usage: Optional[Usage] = None
- """Usage statistics related to the run step.
-
- This value will be `null` while the run step's status is `in_progress`.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py b/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py
deleted file mode 100644
index 6383fcb3..00000000
--- a/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["StepListParams"]
-
-
-class StepListParams(TypedDict, total=False):
- thread_id: Required[str]
-
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]]
- """A list of additional fields to include in the response.
-
- Currently the only supported value is
- `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
- search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py b/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py
deleted file mode 100644
index 93ccb4ca..00000000
--- a/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ...._models import BaseModel
-from .run_step_object import RunStepObject
-
-__all__ = ["StepListResponse"]
-
-
-class StepListResponse(BaseModel):
- data: List[RunStepObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py b/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py
deleted file mode 100644
index ce6bcbfb..00000000
--- a/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["StepRetrieveParams"]
-
-
-class StepRetrieveParams(TypedDict, total=False):
- thread_id: Required[str]
-
- run_id: Required[str]
-
- include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]]
- """A list of additional fields to include in the response.
-
- Currently the only supported value is
- `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
- search result content.
-
- See the
- [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings)
- for more information.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/truncation_object.py b/src/digitalocean_genai_sdk/types/threads/truncation_object.py
deleted file mode 100644
index 7c81b3b5..00000000
--- a/src/digitalocean_genai_sdk/types/threads/truncation_object.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["TruncationObject"]
-
-
-class TruncationObject(BaseModel):
- type: Literal["auto", "last_messages"]
- """The truncation strategy to use for the thread.
-
- The default is `auto`. If set to `last_messages`, the thread will be truncated
- to the n most recent messages in the thread. When set to `auto`, messages in the
- middle of the thread will be dropped to fit the context length of the model,
- `max_prompt_tokens`.
- """
-
- last_messages: Optional[int] = None
- """
- The number of most recent messages from the thread when constructing the context
- for the run.
- """
diff --git a/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py b/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py
deleted file mode 100644
index 98d942fa..00000000
--- a/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["TruncationObjectParam"]
-
-
-class TruncationObjectParam(TypedDict, total=False):
- type: Required[Literal["auto", "last_messages"]]
- """The truncation strategy to use for the thread.
-
- The default is `auto`. If set to `last_messages`, the thread will be truncated
- to the n most recent messages in the thread. When set to `auto`, messages in the
- middle of the thread will be dropped to fit the context length of the model,
- `max_prompt_tokens`.
- """
-
- last_messages: Optional[int]
- """
- The number of most recent messages from the thread when constructing the context
- for the run.
- """
diff --git a/src/digitalocean_genai_sdk/types/transcription_segment.py b/src/digitalocean_genai_sdk/types/transcription_segment.py
deleted file mode 100644
index 2345fa18..00000000
--- a/src/digitalocean_genai_sdk/types/transcription_segment.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-
-__all__ = ["TranscriptionSegment"]
-
-
-class TranscriptionSegment(BaseModel):
- id: int
- """Unique identifier of the segment."""
-
- avg_logprob: float
- """Average logprob of the segment.
-
- If the value is lower than -1, consider the logprobs failed.
- """
-
- compression_ratio: float
- """Compression ratio of the segment.
-
- If the value is greater than 2.4, consider the compression failed.
- """
-
- end: float
- """End time of the segment in seconds."""
-
- no_speech_prob: float
- """Probability of no speech in the segment.
-
- If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this
- segment silent.
- """
-
- seek: int
- """Seek offset of the segment."""
-
- start: float
- """Start time of the segment in seconds."""
-
- temperature: float
- """Temperature parameter used for generating the segment."""
-
- text: str
- """Text content of the segment."""
-
- tokens: List[int]
- """Array of token IDs for the text content."""
diff --git a/src/digitalocean_genai_sdk/types/upload.py b/src/digitalocean_genai_sdk/types/upload.py
deleted file mode 100644
index 06b8a806..00000000
--- a/src/digitalocean_genai_sdk/types/upload.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .openai_file import OpenAIFile
-
-__all__ = ["Upload"]
-
-
-class Upload(BaseModel):
- id: str
- """The Upload unique identifier, which can be referenced in API endpoints."""
-
- bytes: int
- """The intended number of bytes to be uploaded."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the Upload was created."""
-
- expires_at: int
- """The Unix timestamp (in seconds) for when the Upload will expire."""
-
- filename: str
- """The name of the file to be uploaded."""
-
- purpose: str
- """The intended purpose of the file.
-
- [Please refer here](/docs/api-reference/files/object#files/object-purpose) for
- acceptable values.
- """
-
- status: Literal["pending", "completed", "cancelled", "expired"]
- """The status of the Upload."""
-
- file: Optional[OpenAIFile] = None
- """The `File` object represents a document that has been uploaded to OpenAI."""
-
- object: Optional[Literal["upload"]] = None
- """The object type, which is always "upload"."""
diff --git a/src/digitalocean_genai_sdk/types/upload_add_part_params.py b/src/digitalocean_genai_sdk/types/upload_add_part_params.py
deleted file mode 100644
index a0c8b61c..00000000
--- a/src/digitalocean_genai_sdk/types/upload_add_part_params.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-from .._types import FileTypes
-
-__all__ = ["UploadAddPartParams"]
-
-
-class UploadAddPartParams(TypedDict, total=False):
- data: Required[FileTypes]
- """The chunk of bytes for this Part."""
diff --git a/src/digitalocean_genai_sdk/types/upload_add_part_response.py b/src/digitalocean_genai_sdk/types/upload_add_part_response.py
deleted file mode 100644
index fb091f76..00000000
--- a/src/digitalocean_genai_sdk/types/upload_add_part_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["UploadAddPartResponse"]
-
-
-class UploadAddPartResponse(BaseModel):
- id: str
- """The upload Part unique identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the Part was created."""
-
- object: Literal["upload.part"]
- """The object type, which is always `upload.part`."""
-
- upload_id: str
- """The ID of the Upload object that this Part was added to."""
diff --git a/src/digitalocean_genai_sdk/types/upload_complete_params.py b/src/digitalocean_genai_sdk/types/upload_complete_params.py
deleted file mode 100644
index cce568d5..00000000
--- a/src/digitalocean_genai_sdk/types/upload_complete_params.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Required, TypedDict
-
-__all__ = ["UploadCompleteParams"]
-
-
-class UploadCompleteParams(TypedDict, total=False):
- part_ids: Required[List[str]]
- """The ordered list of Part IDs."""
-
- md5: str
- """
- The optional md5 checksum for the file contents to verify if the bytes uploaded
- matches what you expect.
- """
diff --git a/src/digitalocean_genai_sdk/types/upload_create_params.py b/src/digitalocean_genai_sdk/types/upload_create_params.py
deleted file mode 100644
index eab9a51b..00000000
--- a/src/digitalocean_genai_sdk/types/upload_create_params.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["UploadCreateParams"]
-
-
-class UploadCreateParams(TypedDict, total=False):
- bytes: Required[int]
- """The number of bytes in the file you are uploading."""
-
- filename: Required[str]
- """The name of the file to upload."""
-
- mime_type: Required[str]
- """The MIME type of the file.
-
- This must fall within the supported MIME types for your file purpose. See the
- supported MIME types for assistants and vision.
- """
-
- purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]]
- """The intended purpose of the uploaded file.
-
- See the
- [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).
- """
diff --git a/src/digitalocean_genai_sdk/types/usage_response.py b/src/digitalocean_genai_sdk/types/usage_response.py
deleted file mode 100644
index 9f70e7c4..00000000
--- a/src/digitalocean_genai_sdk/types/usage_response.py
+++ /dev/null
@@ -1,352 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from .._models import BaseModel
-
-__all__ = [
- "UsageResponse",
- "Data",
- "DataResult",
- "DataResultUsageCompletionsResult",
- "DataResultUsageEmbeddingsResult",
- "DataResultUsageModerationsResult",
- "DataResultUsageImagesResult",
- "DataResultUsageAudioSpeechesResult",
- "DataResultUsageAudioTranscriptionsResult",
- "DataResultUsageVectorStoresResult",
- "DataResultUsageCodeInterpreterSessionsResult",
- "DataResultCostsResult",
- "DataResultCostsResultAmount",
-]
-
-
-class DataResultUsageCompletionsResult(BaseModel):
- input_tokens: int
- """The aggregated number of text input tokens used, including cached tokens.
-
- For customers subscribe to scale tier, this includes scale tier tokens.
- """
-
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.completions.result"]
-
- output_tokens: int
- """The aggregated number of text output tokens used.
-
- For customers subscribe to scale tier, this includes scale tier tokens.
- """
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- batch: Optional[bool] = None
- """
- When `group_by=batch`, this field tells whether the grouped usage result is
- batch or not.
- """
-
- input_audio_tokens: Optional[int] = None
- """The aggregated number of audio input tokens used, including cached tokens."""
-
- input_cached_tokens: Optional[int] = None
- """
- The aggregated number of text input tokens that has been cached from previous
- requests. For customers subscribe to scale tier, this includes scale tier
- tokens.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- output_audio_tokens: Optional[int] = None
- """The aggregated number of audio output tokens used."""
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageEmbeddingsResult(BaseModel):
- input_tokens: int
- """The aggregated number of input tokens used."""
-
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.embeddings.result"]
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageModerationsResult(BaseModel):
- input_tokens: int
- """The aggregated number of input tokens used."""
-
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.moderations.result"]
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageImagesResult(BaseModel):
- images: int
- """The number of images processed."""
-
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.images.result"]
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- size: Optional[str] = None
- """
- When `group_by=size`, this field provides the image size of the grouped usage
- result.
- """
-
- source: Optional[str] = None
- """
- When `group_by=source`, this field provides the source of the grouped usage
- result, possible values are `image.generation`, `image.edit`, `image.variation`.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageAudioSpeechesResult(BaseModel):
- characters: int
- """The number of characters processed."""
-
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.audio_speeches.result"]
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageAudioTranscriptionsResult(BaseModel):
- num_model_requests: int
- """The count of requests made to the model."""
-
- object: Literal["organization.usage.audio_transcriptions.result"]
-
- seconds: int
- """The number of seconds processed."""
-
- api_key_id: Optional[str] = None
- """
- When `group_by=api_key_id`, this field provides the API key ID of the grouped
- usage result.
- """
-
- model: Optional[str] = None
- """
- When `group_by=model`, this field provides the model name of the grouped usage
- result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
- user_id: Optional[str] = None
- """
- When `group_by=user_id`, this field provides the user ID of the grouped usage
- result.
- """
-
-
-class DataResultUsageVectorStoresResult(BaseModel):
- object: Literal["organization.usage.vector_stores.result"]
-
- usage_bytes: int
- """The vector stores usage in bytes."""
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
-
-class DataResultUsageCodeInterpreterSessionsResult(BaseModel):
- object: Literal["organization.usage.code_interpreter_sessions.result"]
-
- num_sessions: Optional[int] = None
- """The number of code interpreter sessions."""
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- usage result.
- """
-
-
-class DataResultCostsResultAmount(BaseModel):
- currency: Optional[str] = None
- """Lowercase ISO-4217 currency e.g. "usd" """
-
- value: Optional[float] = None
- """The numeric value of the cost."""
-
-
-class DataResultCostsResult(BaseModel):
- object: Literal["organization.costs.result"]
-
- amount: Optional[DataResultCostsResultAmount] = None
- """The monetary value in its associated currency."""
-
- line_item: Optional[str] = None
- """
- When `group_by=line_item`, this field provides the line item of the grouped
- costs result.
- """
-
- project_id: Optional[str] = None
- """
- When `group_by=project_id`, this field provides the project ID of the grouped
- costs result.
- """
-
-
-DataResult: TypeAlias = Union[
- DataResultUsageCompletionsResult,
- DataResultUsageEmbeddingsResult,
- DataResultUsageModerationsResult,
- DataResultUsageImagesResult,
- DataResultUsageAudioSpeechesResult,
- DataResultUsageAudioTranscriptionsResult,
- DataResultUsageVectorStoresResult,
- DataResultUsageCodeInterpreterSessionsResult,
- DataResultCostsResult,
-]
-
-
-class Data(BaseModel):
- end_time: int
-
- object: Literal["bucket"]
-
- result: List[DataResult]
-
- start_time: int
-
-
-class UsageResponse(BaseModel):
- data: List[Data]
-
- has_more: bool
-
- next_page: str
-
- object: Literal["page"]
diff --git a/src/digitalocean_genai_sdk/types/vector_store_create_params.py b/src/digitalocean_genai_sdk/types/vector_store_create_params.py
deleted file mode 100644
index 48118e80..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_create_params.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import TypeAlias, TypedDict
-
-from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam
-from .auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam
-from .static_chunking_strategy_request_param import StaticChunkingStrategyRequestParam
-
-__all__ = ["VectorStoreCreateParams", "ChunkingStrategy"]
-
-
-class VectorStoreCreateParams(TypedDict, total=False):
- chunking_strategy: ChunkingStrategy
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy. Only applicable if `file_ids` is
- non-empty.
- """
-
- expires_after: VectorStoreExpirationAfterParam
- """The expiration policy for a vector store."""
-
- file_ids: List[str]
- """A list of [File](/docs/api-reference/files) IDs that the vector store should
- use.
-
- Useful for tools like `file_search` that can access files.
- """
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- name: str
- """The name of the vector store."""
-
-
-ChunkingStrategy: TypeAlias = Union[AutoChunkingStrategyRequestParam, StaticChunkingStrategyRequestParam]
diff --git a/src/digitalocean_genai_sdk/types/vector_store_delete_response.py b/src/digitalocean_genai_sdk/types/vector_store_delete_response.py
deleted file mode 100644
index 17d3ee21..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["VectorStoreDeleteResponse"]
-
-
-class VectorStoreDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["vector_store.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py b/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py
deleted file mode 100644
index 1d417d52..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["VectorStoreExpirationAfter"]
-
-
-class VectorStoreExpirationAfter(BaseModel):
- anchor: Literal["last_active_at"]
- """Anchor timestamp after which the expiration policy applies.
-
- Supported anchors: `last_active_at`.
- """
-
- days: int
- """The number of days after the anchor time that the vector store will expire."""
diff --git a/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py b/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py
deleted file mode 100644
index 29a008c7..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["VectorStoreExpirationAfterParam"]
-
-
-class VectorStoreExpirationAfterParam(TypedDict, total=False):
- anchor: Required[Literal["last_active_at"]]
- """Anchor timestamp after which the expiration policy applies.
-
- Supported anchors: `last_active_at`.
- """
-
- days: Required[int]
- """The number of days after the anchor time that the vector store will expire."""
diff --git a/src/digitalocean_genai_sdk/types/vector_store_list_params.py b/src/digitalocean_genai_sdk/types/vector_store_list_params.py
deleted file mode 100644
index e26ff90a..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_list_params.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["VectorStoreListParams"]
-
-
-class VectorStoreListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_store_list_response.py b/src/digitalocean_genai_sdk/types/vector_store_list_response.py
deleted file mode 100644
index 2dc455ea..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_list_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-from .vector_store_object import VectorStoreObject
-
-__all__ = ["VectorStoreListResponse"]
-
-
-class VectorStoreListResponse(BaseModel):
- data: List[VectorStoreObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/vector_store_object.py b/src/digitalocean_genai_sdk/types/vector_store_object.py
deleted file mode 100644
index ebd52a31..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_object.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .vector_store_expiration_after import VectorStoreExpirationAfter
-
-__all__ = ["VectorStoreObject", "FileCounts"]
-
-
-class FileCounts(BaseModel):
- cancelled: int
- """The number of files that were cancelled."""
-
- completed: int
- """The number of files that have been successfully processed."""
-
- failed: int
- """The number of files that have failed to process."""
-
- in_progress: int
- """The number of files that are currently being processed."""
-
- total: int
- """The total number of files."""
-
-
-class VectorStoreObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the vector store was created."""
-
- file_counts: FileCounts
-
- last_active_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the vector store was last active."""
-
- metadata: Optional[Dict[str, str]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- name: str
- """The name of the vector store."""
-
- object: Literal["vector_store"]
- """The object type, which is always `vector_store`."""
-
- status: Literal["expired", "in_progress", "completed"]
- """
- The status of the vector store, which can be either `expired`, `in_progress`, or
- `completed`. A status of `completed` indicates that the vector store is ready
- for use.
- """
-
- usage_bytes: int
- """The total number of bytes used by the files in the vector store."""
-
- expires_after: Optional[VectorStoreExpirationAfter] = None
- """The expiration policy for a vector store."""
-
- expires_at: Optional[int] = None
- """The Unix timestamp (in seconds) for when the vector store will expire."""
diff --git a/src/digitalocean_genai_sdk/types/vector_store_search_params.py b/src/digitalocean_genai_sdk/types/vector_store_search_params.py
deleted file mode 100644
index 5b90b063..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_search_params.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .compound_filter_param import CompoundFilterParam
-from .comparison_filter_param import ComparisonFilterParam
-
-__all__ = ["VectorStoreSearchParams", "Filters", "RankingOptions"]
-
-
-class VectorStoreSearchParams(TypedDict, total=False):
- query: Required[Union[str, List[str]]]
- """A query string for a search"""
-
- filters: Filters
- """A filter to apply based on file attributes."""
-
- max_num_results: int
- """The maximum number of results to return.
-
- This number should be between 1 and 50 inclusive.
- """
-
- ranking_options: RankingOptions
- """Ranking options for search."""
-
- rewrite_query: bool
- """Whether to rewrite the natural language query for vector search."""
-
-
-Filters: TypeAlias = Union[ComparisonFilterParam, CompoundFilterParam]
-
-
-class RankingOptions(TypedDict, total=False):
- ranker: Literal["auto", "default-2024-11-15"]
-
- score_threshold: float
diff --git a/src/digitalocean_genai_sdk/types/vector_store_search_response.py b/src/digitalocean_genai_sdk/types/vector_store_search_response.py
deleted file mode 100644
index b303f7ea..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_search_response.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["VectorStoreSearchResponse", "Data", "DataContent"]
-
-
-class DataContent(BaseModel):
- text: str
- """The text content returned from search."""
-
- type: Literal["text"]
- """The type of content."""
-
-
-class Data(BaseModel):
- attributes: Optional[Dict[str, Union[str, float, bool]]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- content: List[DataContent]
- """Content chunks from the file."""
-
- file_id: str
- """The ID of the vector store file."""
-
- filename: str
- """The name of the vector store file."""
-
- score: float
- """The similarity score for the result."""
-
-
-class VectorStoreSearchResponse(BaseModel):
- data: List[Data]
- """The list of search result items."""
-
- has_more: bool
- """Indicates if there are more results to fetch."""
-
- next_page: Optional[str] = None
- """The token for the next page, if any."""
-
- object: Literal["vector_store.search_results.page"]
- """The object type, which is always `vector_store.search_results.page`"""
-
- search_query: List[str]
diff --git a/src/digitalocean_genai_sdk/types/vector_store_update_params.py b/src/digitalocean_genai_sdk/types/vector_store_update_params.py
deleted file mode 100644
index a9400cf2..00000000
--- a/src/digitalocean_genai_sdk/types/vector_store_update_params.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import TypedDict
-
-from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam
-
-__all__ = ["VectorStoreUpdateParams"]
-
-
-class VectorStoreUpdateParams(TypedDict, total=False):
- expires_after: Optional[VectorStoreExpirationAfterParam]
- """The expiration policy for a vector store."""
-
- metadata: Optional[Dict[str, str]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
-
- name: Optional[str]
- """The name of the vector store."""
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/__init__.py b/src/digitalocean_genai_sdk/types/vector_stores/__init__.py
deleted file mode 100644
index 5018f06d..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .file_list_params import FileListParams as FileListParams
-from .file_create_params import FileCreateParams as FileCreateParams
-from .file_update_params import FileUpdateParams as FileUpdateParams
-from .file_delete_response import FileDeleteResponse as FileDeleteResponse
-from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams
-from .vector_store_file_object import VectorStoreFileObject as VectorStoreFileObject
-from .file_batch_list_files_params import FileBatchListFilesParams as FileBatchListFilesParams
-from .file_retrieve_content_response import FileRetrieveContentResponse as FileRetrieveContentResponse
-from .vector_store_file_batch_object import VectorStoreFileBatchObject as VectorStoreFileBatchObject
-from .chunking_strategy_request_param import ChunkingStrategyRequestParam as ChunkingStrategyRequestParam
-from .list_vector_store_files_response import ListVectorStoreFilesResponse as ListVectorStoreFilesResponse
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py
deleted file mode 100644
index 1dab9558..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import TypeAlias
-
-from ..auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam
-from ..static_chunking_strategy_request_param import StaticChunkingStrategyRequestParam
-
-__all__ = ["ChunkingStrategyRequestParam"]
-
-ChunkingStrategyRequestParam: TypeAlias = Union[AutoChunkingStrategyRequestParam, StaticChunkingStrategyRequestParam]
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py
deleted file mode 100644
index 2e2bf227..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Required, TypedDict
-
-from .chunking_strategy_request_param import ChunkingStrategyRequestParam
-
-__all__ = ["FileBatchCreateParams"]
-
-
-class FileBatchCreateParams(TypedDict, total=False):
- file_ids: Required[List[str]]
- """A list of [File](/docs/api-reference/files) IDs that the vector store should
- use.
-
- Useful for tools like `file_search` that can access files.
- """
-
- attributes: Optional[Dict[str, Union[str, float, bool]]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- chunking_strategy: ChunkingStrategyRequestParam
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py
deleted file mode 100644
index 2a0a6c6a..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["FileBatchListFilesParams"]
-
-
-class FileBatchListFilesParams(TypedDict, total=False):
- vector_store_id: Required[str]
-
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- filter: Literal["in_progress", "completed", "failed", "cancelled"]
- """Filter by file status.
-
- One of `in_progress`, `completed`, `failed`, `cancelled`.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py
deleted file mode 100644
index 6183f4e7..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Optional
-from typing_extensions import Required, TypedDict
-
-from .chunking_strategy_request_param import ChunkingStrategyRequestParam
-
-__all__ = ["FileCreateParams"]
-
-
-class FileCreateParams(TypedDict, total=False):
- file_id: Required[str]
- """A [File](/docs/api-reference/files) ID that the vector store should use.
-
- Useful for tools like `file_search` that can access files.
- """
-
- attributes: Optional[Dict[str, Union[str, float, bool]]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- chunking_strategy: ChunkingStrategyRequestParam
- """The chunking strategy used to chunk the file(s).
-
- If not set, will use the `auto` strategy.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py b/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py
deleted file mode 100644
index 24fbe570..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["FileDeleteResponse"]
-
-
-class FileDeleteResponse(BaseModel):
- id: str
-
- deleted: bool
-
- object: Literal["vector_store.file.deleted"]
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py
deleted file mode 100644
index 867b5fb3..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["FileListParams"]
-
-
-class FileListParams(TypedDict, total=False):
- after: str
- """A cursor for use in pagination.
-
- `after` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
- subsequent call can include after=obj_foo in order to fetch the next page of the
- list.
- """
-
- before: str
- """A cursor for use in pagination.
-
- `before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, starting with obj_foo, your
- subsequent call can include before=obj_foo in order to fetch the previous page
- of the list.
- """
-
- filter: Literal["in_progress", "completed", "failed", "cancelled"]
- """Filter by file status.
-
- One of `in_progress`, `completed`, `failed`, `cancelled`.
- """
-
- limit: int
- """A limit on the number of objects to be returned.
-
- Limit can range between 1 and 100, and the default is 20.
- """
-
- order: Literal["asc", "desc"]
- """Sort order by the `created_at` timestamp of the objects.
-
- `asc` for ascending order and `desc` for descending order.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py b/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py
deleted file mode 100644
index e4f0966c..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["FileRetrieveContentResponse", "Data"]
-
-
-class Data(BaseModel):
- text: Optional[str] = None
- """The text content"""
-
- type: Optional[str] = None
- """The content type (currently only `"text"`)"""
-
-
-class FileRetrieveContentResponse(BaseModel):
- data: List[Data]
- """Parsed content of the file."""
-
- has_more: bool
- """Indicates if there are more content pages to fetch."""
-
- next_page: Optional[str] = None
- """The token for the next page, if any."""
-
- object: Literal["vector_store.file_content.page"]
- """The object type, which is always `vector_store.file_content.page`"""
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py
deleted file mode 100644
index ebf540d0..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["FileUpdateParams"]
-
-
-class FileUpdateParams(TypedDict, total=False):
- vector_store_id: Required[str]
-
- attributes: Required[Optional[Dict[str, Union[str, float, bool]]]]
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py b/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py
deleted file mode 100644
index dc997962..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ..._models import BaseModel
-from .vector_store_file_object import VectorStoreFileObject
-
-__all__ = ["ListVectorStoreFilesResponse"]
-
-
-class ListVectorStoreFilesResponse(BaseModel):
- data: List[VectorStoreFileObject]
-
- first_id: str
-
- has_more: bool
-
- last_id: str
-
- object: str
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py b/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py
deleted file mode 100644
index 3d5aa1bd..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["VectorStoreFileBatchObject", "FileCounts"]
-
-
-class FileCounts(BaseModel):
- cancelled: int
- """The number of files that where cancelled."""
-
- completed: int
- """The number of files that have been processed."""
-
- failed: int
- """The number of files that have failed to process."""
-
- in_progress: int
- """The number of files that are currently being processed."""
-
- total: int
- """The total number of files."""
-
-
-class VectorStoreFileBatchObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """
- The Unix timestamp (in seconds) for when the vector store files batch was
- created.
- """
-
- file_counts: FileCounts
-
- object: Literal["vector_store.files_batch"]
- """The object type, which is always `vector_store.file_batch`."""
-
- status: Literal["in_progress", "completed", "cancelled", "failed"]
- """
- The status of the vector store files batch, which can be either `in_progress`,
- `completed`, `cancelled` or `failed`.
- """
-
- vector_store_id: str
- """
- The ID of the [vector store](/docs/api-reference/vector-stores/object) that the
- [File](/docs/api-reference/files) is attached to.
- """
diff --git a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py b/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py
deleted file mode 100644
index e28e28a6..00000000
--- a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Union, Optional
-from typing_extensions import Literal, TypeAlias
-
-from ..._models import BaseModel
-from ..static_chunking_strategy import StaticChunkingStrategy
-
-__all__ = [
- "VectorStoreFileObject",
- "LastError",
- "ChunkingStrategy",
- "ChunkingStrategyStaticChunkingStrategyResponseParam",
- "ChunkingStrategyOtherChunkingStrategyResponseParam",
-]
-
-
-class LastError(BaseModel):
- code: Literal["server_error", "unsupported_file", "invalid_file"]
- """One of `server_error` or `rate_limit_exceeded`."""
-
- message: str
- """A human-readable description of the error."""
-
-
-class ChunkingStrategyStaticChunkingStrategyResponseParam(BaseModel):
- static: StaticChunkingStrategy
-
- type: Literal["static"]
- """Always `static`."""
-
-
-class ChunkingStrategyOtherChunkingStrategyResponseParam(BaseModel):
- type: Literal["other"]
- """Always `other`."""
-
-
-ChunkingStrategy: TypeAlias = Union[
- ChunkingStrategyStaticChunkingStrategyResponseParam, ChunkingStrategyOtherChunkingStrategyResponseParam
-]
-
-
-class VectorStoreFileObject(BaseModel):
- id: str
- """The identifier, which can be referenced in API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the vector store file was created."""
-
- last_error: Optional[LastError] = None
- """The last error associated with this vector store file.
-
- Will be `null` if there are no errors.
- """
-
- object: Literal["vector_store.file"]
- """The object type, which is always `vector_store.file`."""
-
- status: Literal["in_progress", "completed", "cancelled", "failed"]
- """
- The status of the vector store file, which can be either `in_progress`,
- `completed`, `cancelled`, or `failed`. The status `completed` indicates that the
- vector store file is ready for use.
- """
-
- usage_bytes: int
- """The total vector store usage in bytes.
-
- Note that this may be different from the original file size.
- """
-
- vector_store_id: str
- """
- The ID of the [vector store](/docs/api-reference/vector-stores/object) that the
- [File](/docs/api-reference/files) is attached to.
- """
-
- attributes: Optional[Dict[str, Union[str, float, bool]]] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard. Keys are
- strings with a maximum length of 64 characters. Values are strings with a
- maximum length of 512 characters, booleans, or numbers.
- """
-
- chunking_strategy: Optional[ChunkingStrategy] = None
- """The strategy used to chunk the file."""
diff --git a/src/digitalocean_genai_sdk/types/voice_ids_shared.py b/src/digitalocean_genai_sdk/types/voice_ids_shared.py
deleted file mode 100644
index 5679bda3..00000000
--- a/src/digitalocean_genai_sdk/types/voice_ids_shared.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["VoiceIDsShared"]
-
-VoiceIDsShared: TypeAlias = Union[
- str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-]
diff --git a/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py b/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py
deleted file mode 100644
index ccbd853d..00000000
--- a/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, TypeAlias
-
-__all__ = ["VoiceIDsSharedParam"]
-
-VoiceIDsSharedParam: TypeAlias = Union[
- str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-]
diff --git a/src/digitalocean_genai_sdk/types/web_search_tool_call.py b/src/digitalocean_genai_sdk/types/web_search_tool_call.py
deleted file mode 100644
index 1b57ab87..00000000
--- a/src/digitalocean_genai_sdk/types/web_search_tool_call.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["WebSearchToolCall"]
-
-
-class WebSearchToolCall(BaseModel):
- id: str
- """The unique ID of the web search tool call."""
-
- status: Literal["in_progress", "searching", "completed", "failed"]
- """The status of the web search tool call."""
-
- type: Literal["web_search_call"]
- """The type of the web search tool call. Always `web_search_call`."""
diff --git a/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py b/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py
deleted file mode 100644
index 39e5c502..00000000
--- a/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["WebSearchToolCallParam"]
-
-
-class WebSearchToolCallParam(TypedDict, total=False):
- id: Required[str]
- """The unique ID of the web search tool call."""
-
- status: Required[Literal["in_progress", "searching", "completed", "failed"]]
- """The status of the web search tool call."""
-
- type: Required[Literal["web_search_call"]]
- """The type of the web search tool call. Always `web_search_call`."""
diff --git a/src/gradient/__init__.py b/src/gradient/__init__.py
new file mode 100644
index 00000000..c5733e7e
--- /dev/null
+++ b/src/gradient/__init__.py
@@ -0,0 +1,112 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import typing as _t
+
+from . import types
+from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes, omit, not_given
+from ._utils import file_from_path
+from ._client import (
+ Client,
+ Stream,
+ Timeout,
+ Gradient,
+ Transport,
+ AsyncClient,
+ AsyncStream,
+ AsyncGradient,
+ RequestOptions,
+)
+from ._models import BaseModel
+from ._version import __title__, __version__
+from ._response import APIResponse as APIResponse, AsyncAPIResponse as AsyncAPIResponse
+from ._constants import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES, DEFAULT_CONNECTION_LIMITS
+from ._exceptions import (
+ APIError,
+ ConflictError,
+ GradientError,
+ NotFoundError,
+ APIStatusError,
+ RateLimitError,
+ APITimeoutError,
+ BadRequestError,
+ IndexingJobError,
+ APIConnectionError,
+ AuthenticationError,
+ InternalServerError,
+ AgentDeploymentError,
+ PermissionDeniedError,
+ IndexingJobTimeoutError,
+ UnprocessableEntityError,
+ APIResponseValidationError,
+ AgentDeploymentTimeoutError,
+)
+from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient
+from ._utils._logs import setup_logging as _setup_logging
+
+__all__ = [
+ "types",
+ "__version__",
+ "__title__",
+ "NoneType",
+ "Transport",
+ "ProxiesTypes",
+ "NotGiven",
+ "NOT_GIVEN",
+ "not_given",
+ "Omit",
+ "omit",
+ "GradientError",
+ "APIError",
+ "APIStatusError",
+ "APITimeoutError",
+ "APIConnectionError",
+ "APIResponseValidationError",
+ "AgentDeploymentError",
+ "AgentDeploymentTimeoutError",
+ "BadRequestError",
+ "AuthenticationError",
+ "PermissionDeniedError",
+ "NotFoundError",
+ "ConflictError",
+ "UnprocessableEntityError",
+ "RateLimitError",
+ "InternalServerError",
+ "IndexingJobError",
+ "IndexingJobTimeoutError",
+ "AgentDeploymentError",
+ "AgentDeploymentTimeoutError",
+ "Timeout",
+ "RequestOptions",
+ "Client",
+ "AsyncClient",
+ "Stream",
+ "AsyncStream",
+ "Gradient",
+ "AsyncGradient",
+ "file_from_path",
+ "BaseModel",
+ "DEFAULT_TIMEOUT",
+ "DEFAULT_MAX_RETRIES",
+ "DEFAULT_CONNECTION_LIMITS",
+ "DefaultHttpxClient",
+ "DefaultAsyncHttpxClient",
+ "DefaultAioHttpClient",
+]
+
+if not _t.TYPE_CHECKING:
+ from ._utils._resources_proxy import resources as resources
+
+_setup_logging()
+
+# Update the __module__ attribute for exported symbols so that
+# error messages point to this module instead of the module
+# it was originally defined in, e.g.
+# gradient._exceptions.NotFoundError -> gradient.NotFoundError
+__locals = locals()
+for __name in __all__:
+ if not __name.startswith("__"):
+ try:
+ __locals[__name].__module__ = "gradient"
+ except (TypeError, AttributeError):
+ # Some of our exported symbols are builtins which we can't set attributes for.
+ pass
diff --git a/src/digitalocean_genai_sdk/_base_client.py b/src/gradient/_base_client.py
similarity index 84%
rename from src/digitalocean_genai_sdk/_base_client.py
rename to src/gradient/_base_client.py
index 9f58b2f9..ca3db359 100644
--- a/src/digitalocean_genai_sdk/_base_client.py
+++ b/src/gradient/_base_client.py
@@ -9,6 +9,7 @@
import inspect
import logging
import platform
+import warnings
import email.utils
from types import TracebackType
from random import random
@@ -42,7 +43,6 @@
from ._qs import Querystring
from ._files import to_httpx_files, async_to_httpx_files
from ._types import (
- NOT_GIVEN,
Body,
Omit,
Query,
@@ -52,14 +52,17 @@
ResponseT,
AnyMapping,
PostParser,
+ BinaryTypes,
RequestFiles,
HttpxSendArgs,
RequestOptions,
+ AsyncBinaryTypes,
HttpxRequestFiles,
ModelBuilderProtocol,
+ not_given,
)
from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping
-from ._compat import PYDANTIC_V2, model_copy, model_dump
+from ._compat import PYDANTIC_V1, model_copy
from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type
from ._response import (
APIResponse,
@@ -83,6 +86,7 @@
APIConnectionError,
APIResponseValidationError,
)
+from ._utils._json import openapi_dumps
log: logging.Logger = logging.getLogger(__name__)
@@ -145,9 +149,9 @@ def __init__(
def __init__(
self,
*,
- url: URL | NotGiven = NOT_GIVEN,
- json: Body | NotGiven = NOT_GIVEN,
- params: Query | NotGiven = NOT_GIVEN,
+ url: URL | NotGiven = not_given,
+ json: Body | NotGiven = not_given,
+ params: Query | NotGiven = not_given,
) -> None:
self.url = url
self.json = json
@@ -232,7 +236,7 @@ def _set_private_attributes(
model: Type[_T],
options: FinalRequestOptions,
) -> None:
- if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None:
+ if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None:
self.__pydantic_private__ = {}
self._model = model
@@ -320,7 +324,7 @@ def _set_private_attributes(
client: AsyncAPIClient,
options: FinalRequestOptions,
) -> None:
- if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None:
+ if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None:
self.__pydantic_private__ = {}
self._model = model
@@ -376,6 +380,8 @@ def __init__(
timeout: float | Timeout | None = DEFAULT_TIMEOUT,
custom_headers: Mapping[str, str] | None = None,
custom_query: Mapping[str, object] | None = None,
+ user_agent_package: str | None = None,
+ user_agent_version: str | None = None,
) -> None:
self._version = version
self._base_url = self._enforce_trailing_slash(URL(base_url))
@@ -386,10 +392,12 @@ def __init__(
self._strict_response_validation = _strict_response_validation
self._idempotency_header = None
self._platform: Platform | None = None
+ self._user_agent_package = user_agent_package
+ self._user_agent_version = user_agent_version
if max_retries is None: # pyright: ignore[reportUnnecessaryComparison]
raise TypeError(
- "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `digitalocean_genai_sdk.DEFAULT_MAX_RETRIES`"
+ "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradient.DEFAULT_MAX_RETRIES`"
)
def _enforce_trailing_slash(self, url: URL) -> URL:
@@ -477,8 +485,19 @@ def _build_request(
retries_taken: int = 0,
) -> httpx.Request:
if log.isEnabledFor(logging.DEBUG):
- log.debug("Request options: %s", model_dump(options, exclude_unset=True))
-
+ log.debug(
+ "Request options",
+ # model_dump(
+ # options,
+ # exclude_unset=True,
+ # # Pydantic v1 can't dump every type we support in content, so we exclude it for now.
+ # exclude={
+ # "content",
+ # }
+ # if PYDANTIC_V1
+ # else {},
+ # ),
+ )
kwargs: dict[str, Any] = {}
json_data = options.json_data
@@ -529,19 +548,37 @@ def _build_request(
# work around https://github.com/encode/httpx/discussions/2880
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
+ is_body_allowed = options.method.lower() != "get"
+
+ if is_body_allowed:
+ if options.content is not None and json_data is not None:
+ raise TypeError("Passing both `content` and `json_data` is not supported")
+ if options.content is not None and files is not None:
+ raise TypeError("Passing both `content` and `files` is not supported")
+ if options.content is not None:
+ kwargs["content"] = options.content
+ elif isinstance(json_data, bytes):
+ kwargs["content"] = json_data
+ elif not files:
+ # Don't set content when JSON is sent as multipart/form-data,
+ # since httpx's content param overrides other body arguments
+ kwargs["content"] = openapi_dumps(json_data) if is_given(json_data) and json_data is not None else None
+ kwargs["files"] = files
+ else:
+ headers.pop("Content-Type", None)
+ kwargs.pop("data", None)
+
# TODO: report this error to httpx
return self._client.build_request( # pyright: ignore[reportUnknownMemberType]
headers=headers,
- timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout,
+ timeout=(self.timeout if isinstance(options.timeout, NotGiven) else options.timeout),
method=options.method,
url=prepared_url,
# the `Query` type that we use is incompatible with qs'
# `Params` type as it needs to be typed as `Mapping[str, object]`
# so that passing a `TypedDict` doesn't cause an error.
# https://github.com/microsoft/pyright/issues/3526#event-6715453066
- params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None,
- json=json_data if is_given(json_data) else None,
- files=files,
+ params=(self.qs.stringify(cast(Mapping[str, Any], params)) if params else None),
**kwargs,
)
@@ -585,7 +622,7 @@ def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalReques
# we internally support defining a temporary header to override the
# default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response`
# see _response.py for implementation details
- override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, NOT_GIVEN)
+ override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, not_given)
if is_given(override_cast_to):
options.headers = headers
return cast(Type[ResponseT], override_cast_to)
@@ -661,7 +698,10 @@ def _validate_headers(
@property
def user_agent(self) -> str:
- return f"{self.__class__.__name__}/Python {self._version}"
+ # Format: "Gradient/package/version"
+ package = self._user_agent_package or "Python"
+ version = self._user_agent_version if self._user_agent_package and self._user_agent_version else self._version
+ return f"{self.__class__.__name__}/{package}/{version}"
@property
def base_url(self) -> URL:
@@ -815,11 +855,13 @@ def __init__(
version: str,
base_url: str | URL,
max_retries: int = DEFAULT_MAX_RETRIES,
- timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.Client | None = None,
custom_headers: Mapping[str, str] | None = None,
custom_query: Mapping[str, object] | None = None,
_strict_response_validation: bool,
+ user_agent_package: str | None = None,
+ user_agent_version: str | None = None,
) -> None:
if not is_given(timeout):
# if the user passed in a custom http client with a non-default
@@ -848,6 +890,8 @@ def __init__(
custom_query=custom_query,
custom_headers=custom_headers,
_strict_response_validation=_strict_response_validation,
+ user_agent_package=user_agent_package,
+ user_agent_version=user_agent_version,
)
self._client = http_client or SyncHttpxClientWrapper(
base_url=base_url,
@@ -960,6 +1004,9 @@ def request(
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
+ if options.follow_redirects is not None:
+ kwargs["follow_redirects"] = options.follow_redirects
+
log.debug("Sending HTTP Request: %s %s", request.method, request.url)
response = None
@@ -1043,7 +1090,12 @@ def request(
)
def _sleep_for_retry(
- self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
+ self,
+ *,
+ retries_taken: int,
+ max_retries: int,
+ options: FinalRequestOptions,
+ response: httpx.Response | None,
) -> None:
remaining_retries = max_retries - retries_taken
if remaining_retries == 1:
@@ -1068,7 +1120,14 @@ def _process_response(
) -> ResponseT:
origin = get_origin(cast_to) or cast_to
- if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
+ if (
+ inspect.isclass(origin)
+ and issubclass(origin, BaseAPIResponse)
+ # we only want to actually return the custom BaseAPIResponse class if we're
+ # returning the raw response, or if we're not streaming SSE, as if we're streaming
+ # SSE then `cast_to` doesn't actively reflect the type we need to parse into
+ and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER)))
+ ):
if not issubclass(origin, APIResponse):
raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}")
@@ -1174,6 +1233,7 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: Literal[False] = False,
@@ -1186,6 +1246,7 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: Literal[True],
@@ -1199,6 +1260,7 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: bool,
@@ -1211,13 +1273,25 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="post", url=path, json_data=body, files=to_httpx_files(files), **options
+ method="post", url=path, json_data=body, content=content, files=to_httpx_files(files), **options
)
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
@@ -1227,9 +1301,24 @@ def patch(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
+ files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ opts = FinalRequestOptions.construct(
+ method="patch", url=path, json_data=body, content=content, files=to_httpx_files(files), **options
+ )
return self.request(cast_to, opts)
def put(
@@ -1238,11 +1327,23 @@ def put(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="put", url=path, json_data=body, files=to_httpx_files(files), **options
+ method="put", url=path, json_data=body, content=content, files=to_httpx_files(files), **options
)
return self.request(cast_to, opts)
@@ -1252,9 +1353,19 @@ def delete(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, content=content, **options)
return self.request(cast_to, opts)
def get_api_list(
@@ -1279,6 +1390,25 @@ def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
+try:
+ import httpx_aiohttp
+except ImportError:
+
+ class _DefaultAioHttpClient(httpx.AsyncClient):
+ def __init__(self, **_kwargs: Any) -> None:
+ raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra")
+
+else:
+
+ class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore
+ def __init__(self, **kwargs: Any) -> None:
+ kwargs.setdefault("timeout", DEFAULT_TIMEOUT)
+ kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS)
+ kwargs.setdefault("follow_redirects", True)
+
+ super().__init__(**kwargs)
+
+
if TYPE_CHECKING:
DefaultAsyncHttpxClient = httpx.AsyncClient
"""An alias to `httpx.AsyncClient` that provides the same defaults that this SDK
@@ -1287,8 +1417,12 @@ def __init__(self, **kwargs: Any) -> None:
This is useful because overriding the `http_client` with your own instance of
`httpx.AsyncClient` will result in httpx's defaults being used, not ours.
"""
+
+ DefaultAioHttpClient = httpx.AsyncClient
+ """An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`."""
else:
DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient
+ DefaultAioHttpClient = _DefaultAioHttpClient
class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient):
@@ -1314,10 +1448,12 @@ def __init__(
base_url: str | URL,
_strict_response_validation: bool,
max_retries: int = DEFAULT_MAX_RETRIES,
- timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.AsyncClient | None = None,
custom_headers: Mapping[str, str] | None = None,
custom_query: Mapping[str, object] | None = None,
+ user_agent_package: str | None = None,
+ user_agent_version: str | None = None,
) -> None:
if not is_given(timeout):
# if the user passed in a custom http client with a non-default
@@ -1346,6 +1482,8 @@ def __init__(
custom_query=custom_query,
custom_headers=custom_headers,
_strict_response_validation=_strict_response_validation,
+ user_agent_package=user_agent_package,
+ user_agent_version=user_agent_version,
)
self._client = http_client or AsyncHttpxClientWrapper(
base_url=base_url,
@@ -1460,6 +1598,9 @@ async def request(
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
+ if options.follow_redirects is not None:
+ kwargs["follow_redirects"] = options.follow_redirects
+
log.debug("Sending HTTP Request: %s %s", request.method, request.url)
response = None
@@ -1543,7 +1684,12 @@ async def request(
)
async def _sleep_for_retry(
- self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
+ self,
+ *,
+ retries_taken: int,
+ max_retries: int,
+ options: FinalRequestOptions,
+ response: httpx.Response | None,
) -> None:
remaining_retries = max_retries - retries_taken
if remaining_retries == 1:
@@ -1568,7 +1714,14 @@ async def _process_response(
) -> ResponseT:
origin = get_origin(cast_to) or cast_to
- if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
+ if (
+ inspect.isclass(origin)
+ and issubclass(origin, BaseAPIResponse)
+ # we only want to actually return the custom BaseAPIResponse class if we're
+ # returning the raw response, or if we're not streaming SSE, as if we're streaming
+ # SSE then `cast_to` doesn't actively reflect the type we need to parse into
+ and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER)))
+ ):
if not issubclass(origin, AsyncAPIResponse):
raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}")
@@ -1662,6 +1815,7 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: Literal[False] = False,
@@ -1674,6 +1828,7 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: Literal[True],
@@ -1687,6 +1842,7 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: bool,
@@ -1699,13 +1855,25 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: bool = False,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="post", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+ method="post", url=path, json_data=body, content=content, files=await async_to_httpx_files(files), **options
)
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
@@ -1715,9 +1883,29 @@ async def patch(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
+ files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ opts = FinalRequestOptions.construct(
+ method="patch",
+ url=path,
+ json_data=body,
+ content=content,
+ files=await async_to_httpx_files(files),
+ **options,
+ )
return await self.request(cast_to, opts)
async def put(
@@ -1726,11 +1914,23 @@ async def put(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="put", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+ method="put", url=path, json_data=body, content=content, files=await async_to_httpx_files(files), **options
)
return await self.request(cast_to, opts)
@@ -1740,9 +1940,19 @@ async def delete(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, content=content, **options)
return await self.request(cast_to, opts)
def get_api_list(
@@ -1766,8 +1976,8 @@ def make_request_options(
extra_query: Query | None = None,
extra_body: Body | None = None,
idempotency_key: str | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- post_parser: PostParser | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ post_parser: PostParser | NotGiven = not_given,
) -> RequestOptions:
"""Create a dict of type RequestOptions without keys of NotGiven values."""
options: RequestOptions = {}
diff --git a/src/gradient/_client.py b/src/gradient/_client.py
new file mode 100644
index 00000000..30e58802
--- /dev/null
+++ b/src/gradient/_client.py
@@ -0,0 +1,1507 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import TYPE_CHECKING, Any, Mapping
+from typing_extensions import Self, override
+
+import httpx
+
+from . import _exceptions
+from ._qs import Querystring
+from ._types import (
+ Omit,
+ Headers,
+ Timeout,
+ NotGiven,
+ Transport,
+ ProxiesTypes,
+ RequestOptions,
+ not_given,
+)
+from ._utils import is_given, get_async_library
+from ._compat import cached_property
+from ._version import __version__
+from ._streaming import Stream as Stream, AsyncStream as AsyncStream
+from ._exceptions import APIStatusError
+from ._base_client import (
+ DEFAULT_MAX_RETRIES,
+ SyncAPIClient,
+ AsyncAPIClient,
+)
+
+if TYPE_CHECKING:
+ from .resources import (
+ nfs,
+ apps,
+ chat,
+ agents,
+ images,
+ models,
+ billing,
+ regions,
+ retrieve,
+ databases,
+ inference,
+ responses,
+ gpu_droplets,
+ knowledge_bases,
+ )
+ from .resources.images import ImagesResource, AsyncImagesResource
+ from .resources.billing import BillingResource, AsyncBillingResource
+ from .resources.nfs.nfs import NfsResource, AsyncNfsResource
+ from .resources.regions import RegionsResource, AsyncRegionsResource
+ from .resources.retrieve import RetrieveResource, AsyncRetrieveResource
+ from .resources.apps.apps import AppsResource, AsyncAppsResource
+ from .resources.chat.chat import ChatResource, AsyncChatResource
+ from .resources.responses import ResponsesResource, AsyncResponsesResource
+ from .resources.gpu_droplets import GPUDropletsResource, AsyncGPUDropletsResource
+ from .resources.agents.agents import AgentsResource, AsyncAgentsResource
+ from .resources.models.models import ModelsResource, AsyncModelsResource
+ from .resources.databases.databases import DatabasesResource, AsyncDatabasesResource
+ from .resources.inference.inference import InferenceResource, AsyncInferenceResource
+ from .resources.knowledge_bases.knowledge_bases import (
+ KnowledgeBasesResource,
+ AsyncKnowledgeBasesResource,
+ )
+
+__all__ = [
+ "Timeout",
+ "Transport",
+ "ProxiesTypes",
+ "RequestOptions",
+ "Gradient",
+ "AsyncGradient",
+ "Client",
+ "AsyncClient",
+]
+
+
+class Gradient(SyncAPIClient):
+ # client options
+ access_token: str | None
+ model_access_key: str | None
+ agent_access_key: str | None
+ _agent_endpoint: str | None
+ inference_endpoint: str | None
+ kbass_endpoint: str | None
+
+ def __init__(
+ self,
+ *,
+ access_token: str | None = None,
+ model_access_key: str | None = None,
+ agent_access_key: str | None = None,
+ agent_endpoint: str | None = None,
+ inference_endpoint: str | None = None,
+ kbass_endpoint: str | None = None,
+ base_url: str | httpx.URL | None = None,
+ timeout: float | Timeout | None | NotGiven = not_given,
+ max_retries: int = DEFAULT_MAX_RETRIES,
+ default_headers: Mapping[str, str] | None = None,
+ default_query: Mapping[str, object] | None = None,
+ # Configure a custom httpx client.
+ # We provide a `DefaultHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
+ # See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
+ http_client: httpx.Client | None = None,
+ # Enable or disable schema validation for data returned by the API.
+ # When enabled an error APIResponseValidationError is raised
+ # if the API responds with invalid data for the expected schema.
+ #
+ # This parameter may be removed or changed in the future.
+ # If you rely on this feature, please open a GitHub issue
+ # outlining your use-case to help us decide if it should be
+ # part of our public interface in the future.
+ _strict_response_validation: bool = False,
+ # User agent tracking parameters
+ user_agent_package: str | None = None,
+ user_agent_version: str | None = None,
+ ) -> None:
+ """Construct a new synchronous Gradient client instance.
+
+ This automatically infers the following arguments from their corresponding environment variables if they are not provided:
+ - `access_token` from `DIGITALOCEAN_ACCESS_TOKEN`
+ - `model_access_key` from `GRADIENT_MODEL_ACCESS_KEY`
+ - `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY`
+ - `agent_endpoint` from `GRADIENT_AGENT_ENDPOINT`
+ - `inference_endpoint` from `GRADIENT_INFERENCE_ENDPOINT`
+ - `kbass_endpoint` from `GRADIENT_KBASS_ENDPOINT`
+ """
+ if access_token is None:
+ access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN")
+ self.access_token = access_token
+
+ if model_access_key is None:
+ model_access_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY")
+ self.model_access_key = model_access_key
+
+ if agent_access_key is None:
+ agent_access_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY")
+ self.agent_access_key = agent_access_key
+
+ if agent_endpoint is None:
+ agent_endpoint = os.environ.get("GRADIENT_AGENT_ENDPOINT")
+ self._agent_endpoint = agent_endpoint
+
+ if inference_endpoint is None:
+ inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") or "https://inference.do-ai.run"
+ self.inference_endpoint = inference_endpoint
+
+ if kbass_endpoint is None:
+ kbass_endpoint = os.environ.get("GRADIENT_KBASS_ENDPOINT") or "kbaas.do-ai.run"
+ self.kbass_endpoint = kbass_endpoint
+
+ if base_url is None:
+ base_url = os.environ.get("GRADIENT_BASE_URL")
+ self._base_url_overridden = base_url is not None
+ if base_url is None:
+ base_url = f"https://api.digitalocean.com"
+
+ super().__init__(
+ version=__version__,
+ base_url=base_url,
+ max_retries=max_retries,
+ timeout=timeout,
+ http_client=http_client,
+ custom_headers=default_headers,
+ custom_query=default_query,
+ _strict_response_validation=_strict_response_validation,
+ user_agent_package=user_agent_package,
+ user_agent_version=user_agent_version,
+ )
+
+ self._default_stream_cls = Stream
+
+ @cached_property
+ def agent_endpoint(self) -> str:
+ """
+ Returns the agent endpoint URL.
+ """
+ if self._agent_endpoint is None:
+ raise ValueError(
+ "Agent endpoint is not set. Please provide an agent endpoint when initializing the client."
+ )
+ if self._agent_endpoint.startswith("https://"):
+ return self._agent_endpoint
+ return "https://" + self._agent_endpoint
+
+ @cached_property
+ def agents(self) -> AgentsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.agents import AgentsResource
+
+ return AgentsResource(self)
+
+ @cached_property
+ def chat(self) -> ChatResource:
+ from .resources.chat import ChatResource
+
+ return ChatResource(self)
+
+ @cached_property
+ def images(self) -> ImagesResource:
+ """Generate images from text prompts using various AI models."""
+ from .resources.images import ImagesResource
+
+ return ImagesResource(self)
+
+ @cached_property
+ def responses(self) -> ResponsesResource:
+ """Generate text-to-text responses from text prompts."""
+ from .resources.responses import ResponsesResource
+
+ return ResponsesResource(self)
+
+ @cached_property
+ def gpu_droplets(self) -> GPUDropletsResource:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ from .resources.gpu_droplets import GPUDropletsResource
+
+ return GPUDropletsResource(self)
+
+ @cached_property
+ def inference(self) -> InferenceResource:
+ from .resources.inference import InferenceResource
+
+ return InferenceResource(self)
+
+ @cached_property
+ def knowledge_bases(self) -> KnowledgeBasesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.knowledge_bases import KnowledgeBasesResource
+
+ return KnowledgeBasesResource(self)
+
+ @cached_property
+ def models(self) -> ModelsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.models import ModelsResource
+
+ return ModelsResource(self)
+
+ @cached_property
+ def regions(self) -> RegionsResource:
+ """Provides information about DigitalOcean data center regions."""
+ from .resources.regions import RegionsResource
+
+ return RegionsResource(self)
+
+ @cached_property
+ def databases(self) -> DatabasesResource:
+ from .resources.databases import DatabasesResource
+
+ return DatabasesResource(self)
+
+ @cached_property
+ def nfs(self) -> NfsResource:
+ from .resources.nfs import NfsResource
+
+ return NfsResource(self)
+
+ @cached_property
+ def retrieve(self) -> RetrieveResource:
+ from .resources.retrieve import RetrieveResource
+
+ return RetrieveResource(self)
+
+ @cached_property
+ def apps(self) -> AppsResource:
+ from .resources.apps import AppsResource
+
+ return AppsResource(self)
+
+ @cached_property
+ def billing(self) -> BillingResource:
+ """
+ The billing endpoints allow you to retrieve your account balance, invoices,
+ billing history, and insights.
+
+ **Balance:** By sending requests to the `/v2/customers/my/balance` endpoint, you can
+ retrieve the balance information for the requested customer account.
+
+ **Invoices:** [Invoices](https://docs.digitalocean.com/platform/billing/invoices/)
+ are generated on the first of each month for every DigitalOcean
+ customer. An invoice preview is generated daily, which can be accessed
+ with the `preview` keyword in place of `$INVOICE_UUID`. To interact with
+ invoices, you will generally send requests to the invoices endpoint at
+ `/v2/customers/my/invoices`.
+
+ **Billing History:** Billing history is a record of billing events for your account.
+ For example, entries may include events like payments made, invoices
+ issued, or credits granted. To interact with invoices, you
+ will generally send requests to the invoices endpoint at
+ `/v2/customers/my/billing_history`.
+
+ **Billing Insights:** Day-over-day changes in billing resource usage based on nightly invoice items,
+ including total amount, region, SKU, and description for a specified date range.
+ It is important to note that the daily resource usage may not reflect month-end billing totals when totaled for
+ a given month as nightly invoice items do not necessarily encompass all invoicing factors for the entire month.
+ `v2/billing/{account_urn}/insights/{start_date}/{end_date}` where account_urn is the URN of the customer
+ account, can be a team (do:team:uuid) or an organization (do:teamgroup:uuid). The date range specified by
+ start_date and end_date must be in YYYY-MM-DD format.
+ """
+ from .resources.billing import BillingResource
+
+ return BillingResource(self)
+
+ @cached_property
+ def with_raw_response(self) -> GradientWithRawResponse:
+ return GradientWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> GradientWithStreamedResponse:
+ return GradientWithStreamedResponse(self)
+
+ @property
+ @override
+ def qs(self) -> Querystring:
+ return Querystring(array_format="comma")
+
+ @property
+ @override
+ def auth_headers(self) -> dict[str, str]:
+ return {**self._model_access_key, **self._agent_access_key, **self._bearer_auth}
+
+ @property
+ def _bearer_auth(self) -> dict[str, str]:
+ access_token = self.access_token
+ if access_token is None:
+ return {}
+ return {"Authorization": f"Bearer {access_token}"}
+
+ @property
+ def _model_access_key(self) -> dict[str, str]:
+ model_access_key = self.model_access_key
+ if model_access_key is None:
+ return {}
+ return {"Authorization": f"Bearer {model_access_key}"}
+
+ @property
+ def _agent_access_key(self) -> dict[str, str]:
+ agent_access_key = self.agent_access_key
+ if agent_access_key is None:
+ return {}
+ return {"Authorization": f"Bearer {agent_access_key}"}
+
+ @property
+ @override
+ def default_headers(self) -> dict[str, str | Omit]:
+ return {
+ **super().default_headers,
+ "X-Stainless-Async": "false",
+ **self._custom_headers,
+ }
+
+ @override
+ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
+ if (self.access_token or self.agent_access_key or self.model_access_key) and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ if self.model_access_key and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ if self.agent_access_key and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ if self.model_access_key and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ if self.agent_access_key and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ raise TypeError(
+ '"Could not resolve authentication method. Expected access_token, agent_access_key, or model_access_key to be set. Or for the `Authorization` headers to be explicitly omitted"'
+ )
+
+ def copy(
+ self,
+ *,
+ access_token: str | None = None,
+ model_access_key: str | None = None,
+ agent_access_key: str | None = None,
+ agent_endpoint: str | None = None,
+ inference_endpoint: str | None = None,
+ kbass_endpoint: str | None = None,
+ base_url: str | httpx.URL | None = None,
+ timeout: float | Timeout | None | NotGiven = not_given,
+ http_client: httpx.Client | None = None,
+ max_retries: int | NotGiven = not_given,
+ default_headers: Mapping[str, str] | None = None,
+ set_default_headers: Mapping[str, str] | None = None,
+ default_query: Mapping[str, object] | None = None,
+ set_default_query: Mapping[str, object] | None = None,
+ user_agent_package: str | None = None,
+ user_agent_version: str | None = None,
+ _extra_kwargs: Mapping[str, Any] = {},
+ ) -> Self:
+ """
+ Create a new client instance re-using the same options given to the current client with optional overriding.
+ """
+ if default_headers is not None and set_default_headers is not None:
+ raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
+
+ if default_query is not None and set_default_query is not None:
+ raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
+
+ headers = self._custom_headers
+ if default_headers is not None:
+ headers = {**headers, **default_headers}
+ elif set_default_headers is not None:
+ headers = set_default_headers
+
+ params = self._custom_query
+ if default_query is not None:
+ params = {**params, **default_query}
+ elif set_default_query is not None:
+ params = set_default_query
+
+ http_client = http_client or self._client
+ client = self.__class__(
+ access_token=access_token or self.access_token,
+ model_access_key=model_access_key or self.model_access_key,
+ agent_access_key=agent_access_key or self.agent_access_key,
+ agent_endpoint=agent_endpoint or self._agent_endpoint,
+ inference_endpoint=inference_endpoint or self.inference_endpoint,
+ kbass_endpoint=kbass_endpoint or self.kbass_endpoint,
+ base_url=base_url or self.base_url,
+ timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
+ http_client=http_client,
+ max_retries=max_retries if is_given(max_retries) else self.max_retries,
+ default_headers=headers,
+ default_query=params,
+ user_agent_package=user_agent_package or self._user_agent_package,
+ user_agent_version=user_agent_version or self._user_agent_version,
+ **_extra_kwargs,
+ )
+ client._base_url_overridden = self._base_url_overridden or base_url is not None
+ return client
+
+ # Alias for `copy` for nicer inline usage, e.g.
+ # client.with_options(timeout=10).foo.create(...)
+ with_options = copy
+
+ @override
+ def _make_status_error(
+ self,
+ err_msg: str,
+ *,
+ body: object,
+ response: httpx.Response,
+ ) -> APIStatusError:
+ if response.status_code == 400:
+ return _exceptions.BadRequestError(err_msg, response=response, body=body)
+
+ if response.status_code == 401:
+ return _exceptions.AuthenticationError(err_msg, response=response, body=body)
+
+ if response.status_code == 403:
+ return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
+
+ if response.status_code == 404:
+ return _exceptions.NotFoundError(err_msg, response=response, body=body)
+
+ if response.status_code == 409:
+ return _exceptions.ConflictError(err_msg, response=response, body=body)
+
+ if response.status_code == 422:
+ return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
+
+ if response.status_code == 429:
+ return _exceptions.RateLimitError(err_msg, response=response, body=body)
+
+ if response.status_code >= 500:
+ return _exceptions.InternalServerError(err_msg, response=response, body=body)
+ return APIStatusError(err_msg, response=response, body=body)
+
+
+class AsyncGradient(AsyncAPIClient):
+ # client options
+ access_token: str | None
+ model_access_key: str | None
+ agent_access_key: str | None
+ _agent_endpoint: str | None
+ inference_endpoint: str | None
+ kbass_endpoint: str | None
+
+ def __init__(
+ self,
+ *,
+ access_token: str | None = None,
+ model_access_key: str | None = None,
+ agent_access_key: str | None = None,
+ agent_endpoint: str | None = None,
+ inference_endpoint: str | None = None,
+ kbass_endpoint: str | None = None,
+ base_url: str | httpx.URL | None = None,
+ timeout: float | Timeout | None | NotGiven = not_given,
+ max_retries: int = DEFAULT_MAX_RETRIES,
+ default_headers: Mapping[str, str] | None = None,
+ default_query: Mapping[str, object] | None = None,
+ # Configure a custom httpx client.
+ # We provide a `DefaultAsyncHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
+ # See the [httpx documentation](https://www.python-httpx.org/api/#asyncclient) for more details.
+ http_client: httpx.AsyncClient | None = None,
+ # Enable or disable schema validation for data returned by the API.
+ # When enabled an error APIResponseValidationError is raised
+ # if the API responds with invalid data for the expected schema.
+ #
+ # This parameter may be removed or changed in the future.
+ # If you rely on this feature, please open a GitHub issue
+ # outlining your use-case to help us decide if it should be
+ # part of our public interface in the future.
+ _strict_response_validation: bool = False,
+ # User agent tracking parameters
+ user_agent_package: str | None = None,
+ user_agent_version: str | None = None,
+ ) -> None:
+ """Construct a new async AsyncGradient client instance.
+
+ This automatically infers the following arguments from their corresponding environment variables if they are not provided:
+ - `access_token` from `DIGITALOCEAN_ACCESS_TOKEN`
+ - `model_access_key` from `GRADIENT_MODEL_ACCESS_KEY`
+ - `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY`
+ - `agent_endpoint` from `GRADIENT_AGENT_ENDPOINT`
+ - `inference_endpoint` from `GRADIENT_INFERENCE_ENDPOINT`
+ - `kbass_endpoint` from `GRADIENT_KBASS_ENDPOINT`
+ """
+ if access_token is None:
+ access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN")
+ self.access_token = access_token
+
+ if model_access_key is None:
+ model_access_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY")
+ self.model_access_key = model_access_key
+
+ if agent_access_key is None:
+ agent_access_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY")
+ self.agent_access_key = agent_access_key
+
+ if agent_endpoint is None:
+ agent_endpoint = os.environ.get("GRADIENT_AGENT_ENDPOINT")
+ self._agent_endpoint = agent_endpoint
+
+ if inference_endpoint is None:
+ inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") or "https://inference.do-ai.run"
+ self.inference_endpoint = inference_endpoint
+
+ if kbass_endpoint is None:
+ kbass_endpoint = os.environ.get("GRADIENT_KBASS_ENDPOINT") or "kbaas.do-ai.run"
+ self.kbass_endpoint = kbass_endpoint
+
+ if base_url is None:
+ base_url = os.environ.get("GRADIENT_BASE_URL")
+ self._base_url_overridden = base_url is not None
+ if base_url is None:
+ base_url = f"https://api.digitalocean.com"
+
+ super().__init__(
+ version=__version__,
+ base_url=base_url,
+ max_retries=max_retries,
+ timeout=timeout,
+ http_client=http_client,
+ custom_headers=default_headers,
+ custom_query=default_query,
+ _strict_response_validation=_strict_response_validation,
+ user_agent_package=user_agent_package,
+ user_agent_version=user_agent_version,
+ )
+
+ self._default_stream_cls = AsyncStream
+
+ @cached_property
+ def agent_endpoint(self) -> str:
+ """
+ Returns the agent endpoint URL.
+ """
+ if self._agent_endpoint is None:
+ raise ValueError(
+ "Agent endpoint is not set. Please provide an agent endpoint when initializing the client."
+ )
+ if self._agent_endpoint.startswith("https://"):
+ return self._agent_endpoint
+ return "https://" + self._agent_endpoint
+
+ @cached_property
+ def agents(self) -> AsyncAgentsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.agents import AsyncAgentsResource
+
+ return AsyncAgentsResource(self)
+
+ @cached_property
+ def chat(self) -> AsyncChatResource:
+ from .resources.chat import AsyncChatResource
+
+ return AsyncChatResource(self)
+
+ @cached_property
+ def images(self) -> AsyncImagesResource:
+ """Generate images from text prompts using various AI models."""
+ from .resources.images import AsyncImagesResource
+
+ return AsyncImagesResource(self)
+
+ @cached_property
+ def responses(self) -> AsyncResponsesResource:
+ """Generate text-to-text responses from text prompts."""
+ from .resources.responses import AsyncResponsesResource
+
+ return AsyncResponsesResource(self)
+
+ @cached_property
+ def gpu_droplets(self) -> AsyncGPUDropletsResource:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ from .resources.gpu_droplets import AsyncGPUDropletsResource
+
+ return AsyncGPUDropletsResource(self)
+
+ @cached_property
+ def inference(self) -> AsyncInferenceResource:
+ from .resources.inference import AsyncInferenceResource
+
+ return AsyncInferenceResource(self)
+
+ @cached_property
+ def knowledge_bases(self) -> AsyncKnowledgeBasesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.knowledge_bases import AsyncKnowledgeBasesResource
+
+ return AsyncKnowledgeBasesResource(self)
+
+ @cached_property
+ def models(self) -> AsyncModelsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.models import AsyncModelsResource
+
+ return AsyncModelsResource(self)
+
+ @cached_property
+ def regions(self) -> AsyncRegionsResource:
+ """Provides information about DigitalOcean data center regions."""
+ from .resources.regions import AsyncRegionsResource
+
+ return AsyncRegionsResource(self)
+
+ @cached_property
+ def databases(self) -> AsyncDatabasesResource:
+ from .resources.databases import AsyncDatabasesResource
+
+ return AsyncDatabasesResource(self)
+
+ @cached_property
+ def nfs(self) -> AsyncNfsResource:
+ from .resources.nfs import AsyncNfsResource
+
+ return AsyncNfsResource(self)
+
+ @cached_property
+ def retrieve(self) -> AsyncRetrieveResource:
+ from .resources.retrieve import AsyncRetrieveResource
+
+ return AsyncRetrieveResource(self)
+
+ @cached_property
+ def apps(self) -> AsyncAppsResource:
+ from .resources.apps import AsyncAppsResource
+
+ return AsyncAppsResource(self)
+
+ @cached_property
+ def billing(self) -> AsyncBillingResource:
+ """
+ The billing endpoints allow you to retrieve your account balance, invoices,
+ billing history, and insights.
+
+ **Balance:** By sending requests to the `/v2/customers/my/balance` endpoint, you can
+ retrieve the balance information for the requested customer account.
+
+ **Invoices:** [Invoices](https://docs.digitalocean.com/platform/billing/invoices/)
+ are generated on the first of each month for every DigitalOcean
+ customer. An invoice preview is generated daily, which can be accessed
+ with the `preview` keyword in place of `$INVOICE_UUID`. To interact with
+ invoices, you will generally send requests to the invoices endpoint at
+ `/v2/customers/my/invoices`.
+
+ **Billing History:** Billing history is a record of billing events for your account.
+ For example, entries may include events like payments made, invoices
+ issued, or credits granted. To interact with invoices, you
+ will generally send requests to the invoices endpoint at
+ `/v2/customers/my/billing_history`.
+
+ **Billing Insights:** Day-over-day changes in billing resource usage based on nightly invoice items,
+ including total amount, region, SKU, and description for a specified date range.
+ It is important to note that the daily resource usage may not reflect month-end billing totals when totaled for
+ a given month as nightly invoice items do not necessarily encompass all invoicing factors for the entire month.
+ `v2/billing/{account_urn}/insights/{start_date}/{end_date}` where account_urn is the URN of the customer
+ account, can be a team (do:team:uuid) or an organization (do:teamgroup:uuid). The date range specified by
+ start_date and end_date must be in YYYY-MM-DD format.
+ """
+ from .resources.billing import AsyncBillingResource
+
+ return AsyncBillingResource(self)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncGradientWithRawResponse:
+ return AsyncGradientWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncGradientWithStreamedResponse:
+ return AsyncGradientWithStreamedResponse(self)
+
+ @property
+ @override
+ def qs(self) -> Querystring:
+ return Querystring(array_format="comma")
+
+ @property
+ @override
+ def auth_headers(self) -> dict[str, str]:
+ return {**self._model_access_key, **self._agent_access_key, **self._bearer_auth}
+
+ @property
+ def _bearer_auth(self) -> dict[str, str]:
+ access_token = self.access_token
+ if access_token is None:
+ return {}
+ return {"Authorization": f"Bearer {access_token}"}
+
+ @property
+ def _model_access_key(self) -> dict[str, str]:
+ model_access_key = self.model_access_key
+ if model_access_key is None:
+ return {}
+ return {"Authorization": f"Bearer {model_access_key}"}
+
+ @property
+ def _agent_access_key(self) -> dict[str, str]:
+ agent_access_key = self.agent_access_key
+ if agent_access_key is None:
+ return {}
+ return {"Authorization": f"Bearer {agent_access_key}"}
+
+ @property
+ @override
+ def default_headers(self) -> dict[str, str | Omit]:
+ return {
+ **super().default_headers,
+ "X-Stainless-Async": f"async:{get_async_library()}",
+ **self._custom_headers,
+ }
+
+ @override
+ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
+ if (self.access_token or self.agent_access_key or self.model_access_key) and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ if self.model_access_key and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ if self.agent_access_key and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ if self.model_access_key and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ if self.agent_access_key and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ raise TypeError(
+ '"Could not resolve authentication method. Expected access_token, agent_access_key, or model_access_key to be set. Or for the `Authorization` headers to be explicitly omitted"'
+ )
+
+ def copy(
+ self,
+ *,
+ agent_endpoint: str | None = None,
+ access_token: str | None = None,
+ model_access_key: str | None = None,
+ agent_access_key: str | None = None,
+ inference_endpoint: str | None = None,
+ kbass_endpoint: str | None = None,
+ base_url: str | httpx.URL | None = None,
+ timeout: float | Timeout | None | NotGiven = not_given,
+ http_client: httpx.AsyncClient | None = None,
+ max_retries: int | NotGiven = not_given,
+ default_headers: Mapping[str, str] | None = None,
+ set_default_headers: Mapping[str, str] | None = None,
+ default_query: Mapping[str, object] | None = None,
+ set_default_query: Mapping[str, object] | None = None,
+ user_agent_package: str | None = None,
+ user_agent_version: str | None = None,
+ _extra_kwargs: Mapping[str, Any] = {},
+ ) -> Self:
+ """
+ Create a new client instance re-using the same options given to the current client with optional overriding.
+ """
+ if default_headers is not None and set_default_headers is not None:
+ raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
+
+ if default_query is not None and set_default_query is not None:
+ raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
+
+ headers = self._custom_headers
+ if default_headers is not None:
+ headers = {**headers, **default_headers}
+ elif set_default_headers is not None:
+ headers = set_default_headers
+
+ params = self._custom_query
+ if default_query is not None:
+ params = {**params, **default_query}
+ elif set_default_query is not None:
+ params = set_default_query
+
+ http_client = http_client or self._client
+ client = self.__class__(
+ access_token=access_token or self.access_token,
+ model_access_key=model_access_key or self.model_access_key,
+ agent_access_key=agent_access_key or self.agent_access_key,
+ agent_endpoint=agent_endpoint or self._agent_endpoint,
+ inference_endpoint=inference_endpoint or self.inference_endpoint,
+ kbass_endpoint=kbass_endpoint or self.kbass_endpoint,
+ base_url=base_url or self.base_url,
+ timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
+ http_client=http_client,
+ max_retries=max_retries if is_given(max_retries) else self.max_retries,
+ default_headers=headers,
+ default_query=params,
+ user_agent_package=user_agent_package or self._user_agent_package,
+ user_agent_version=user_agent_version or self._user_agent_version,
+ **_extra_kwargs,
+ )
+ client._base_url_overridden = self._base_url_overridden or base_url is not None
+ return client
+
+ # Alias for `copy` for nicer inline usage, e.g.
+ # client.with_options(timeout=10).foo.create(...)
+ with_options = copy
+
+ @override
+ def _make_status_error(
+ self,
+ err_msg: str,
+ *,
+ body: object,
+ response: httpx.Response,
+ ) -> APIStatusError:
+ if response.status_code == 400:
+ return _exceptions.BadRequestError(err_msg, response=response, body=body)
+
+ if response.status_code == 401:
+ return _exceptions.AuthenticationError(err_msg, response=response, body=body)
+
+ if response.status_code == 403:
+ return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
+
+ if response.status_code == 404:
+ return _exceptions.NotFoundError(err_msg, response=response, body=body)
+
+ if response.status_code == 409:
+ return _exceptions.ConflictError(err_msg, response=response, body=body)
+
+ if response.status_code == 422:
+ return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
+
+ if response.status_code == 429:
+ return _exceptions.RateLimitError(err_msg, response=response, body=body)
+
+ if response.status_code >= 500:
+ return _exceptions.InternalServerError(err_msg, response=response, body=body)
+ return APIStatusError(err_msg, response=response, body=body)
+
+
+class GradientWithRawResponse:
+ _client: Gradient
+
+ def __init__(self, client: Gradient) -> None:
+ self._client = client
+
+ @cached_property
+ def agents(self) -> agents.AgentsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.agents import AgentsResourceWithRawResponse
+
+ return AgentsResourceWithRawResponse(self._client.agents)
+
+ @cached_property
+ def chat(self) -> chat.ChatResourceWithRawResponse:
+ from .resources.chat import ChatResourceWithRawResponse
+
+ return ChatResourceWithRawResponse(self._client.chat)
+
+ @cached_property
+ def images(self) -> images.ImagesResourceWithRawResponse:
+ """Generate images from text prompts using various AI models."""
+ from .resources.images import ImagesResourceWithRawResponse
+
+ return ImagesResourceWithRawResponse(self._client.images)
+
+ @cached_property
+ def responses(self) -> responses.ResponsesResourceWithRawResponse:
+ """Generate text-to-text responses from text prompts."""
+ from .resources.responses import ResponsesResourceWithRawResponse
+
+ return ResponsesResourceWithRawResponse(self._client.responses)
+
+ @cached_property
+ def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithRawResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ from .resources.gpu_droplets import GPUDropletsResourceWithRawResponse
+
+ return GPUDropletsResourceWithRawResponse(self._client.gpu_droplets)
+
+ @cached_property
+ def inference(self) -> inference.InferenceResourceWithRawResponse:
+ from .resources.inference import InferenceResourceWithRawResponse
+
+ return InferenceResourceWithRawResponse(self._client.inference)
+
+ @cached_property
+ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse
+
+ return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
+
+ @cached_property
+ def models(self) -> models.ModelsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.models import ModelsResourceWithRawResponse
+
+ return ModelsResourceWithRawResponse(self._client.models)
+
+ @cached_property
+ def regions(self) -> regions.RegionsResourceWithRawResponse:
+ """Provides information about DigitalOcean data center regions."""
+ from .resources.regions import RegionsResourceWithRawResponse
+
+ return RegionsResourceWithRawResponse(self._client.regions)
+
+ @cached_property
+ def databases(self) -> databases.DatabasesResourceWithRawResponse:
+ from .resources.databases import DatabasesResourceWithRawResponse
+
+ return DatabasesResourceWithRawResponse(self._client.databases)
+
+ @cached_property
+ def nfs(self) -> nfs.NfsResourceWithRawResponse:
+ from .resources.nfs import NfsResourceWithRawResponse
+
+ return NfsResourceWithRawResponse(self._client.nfs)
+
+ @cached_property
+ def retrieve(self) -> retrieve.RetrieveResourceWithRawResponse:
+ from .resources.retrieve import RetrieveResourceWithRawResponse
+
+ return RetrieveResourceWithRawResponse(self._client.retrieve)
+
+ @cached_property
+ def apps(self) -> apps.AppsResourceWithRawResponse:
+ from .resources.apps import AppsResourceWithRawResponse
+
+ return AppsResourceWithRawResponse(self._client.apps)
+
+ @cached_property
+ def billing(self) -> billing.BillingResourceWithRawResponse:
+ """
+ The billing endpoints allow you to retrieve your account balance, invoices,
+ billing history, and insights.
+
+ **Balance:** By sending requests to the `/v2/customers/my/balance` endpoint, you can
+ retrieve the balance information for the requested customer account.
+
+ **Invoices:** [Invoices](https://docs.digitalocean.com/platform/billing/invoices/)
+ are generated on the first of each month for every DigitalOcean
+ customer. An invoice preview is generated daily, which can be accessed
+ with the `preview` keyword in place of `$INVOICE_UUID`. To interact with
+ invoices, you will generally send requests to the invoices endpoint at
+ `/v2/customers/my/invoices`.
+
+ **Billing History:** Billing history is a record of billing events for your account.
+ For example, entries may include events like payments made, invoices
+ issued, or credits granted. To interact with invoices, you
+ will generally send requests to the invoices endpoint at
+ `/v2/customers/my/billing_history`.
+
+ **Billing Insights:** Day-over-day changes in billing resource usage based on nightly invoice items,
+ including total amount, region, SKU, and description for a specified date range.
+ It is important to note that the daily resource usage may not reflect month-end billing totals when totaled for
+ a given month as nightly invoice items do not necessarily encompass all invoicing factors for the entire month.
+ `v2/billing/{account_urn}/insights/{start_date}/{end_date}` where account_urn is the URN of the customer
+ account, can be a team (do:team:uuid) or an organization (do:teamgroup:uuid). The date range specified by
+ start_date and end_date must be in YYYY-MM-DD format.
+ """
+ from .resources.billing import BillingResourceWithRawResponse
+
+ return BillingResourceWithRawResponse(self._client.billing)
+
+
+class AsyncGradientWithRawResponse:
+ _client: AsyncGradient
+
+ def __init__(self, client: AsyncGradient) -> None:
+ self._client = client
+
+ @cached_property
+ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.agents import AsyncAgentsResourceWithRawResponse
+
+ return AsyncAgentsResourceWithRawResponse(self._client.agents)
+
+ @cached_property
+ def chat(self) -> chat.AsyncChatResourceWithRawResponse:
+ from .resources.chat import AsyncChatResourceWithRawResponse
+
+ return AsyncChatResourceWithRawResponse(self._client.chat)
+
+ @cached_property
+ def images(self) -> images.AsyncImagesResourceWithRawResponse:
+ """Generate images from text prompts using various AI models."""
+ from .resources.images import AsyncImagesResourceWithRawResponse
+
+ return AsyncImagesResourceWithRawResponse(self._client.images)
+
+ @cached_property
+ def responses(self) -> responses.AsyncResponsesResourceWithRawResponse:
+ """Generate text-to-text responses from text prompts."""
+ from .resources.responses import AsyncResponsesResourceWithRawResponse
+
+ return AsyncResponsesResourceWithRawResponse(self._client.responses)
+
+ @cached_property
+ def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithRawResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ from .resources.gpu_droplets import AsyncGPUDropletsResourceWithRawResponse
+
+ return AsyncGPUDropletsResourceWithRawResponse(self._client.gpu_droplets)
+
+ @cached_property
+ def inference(self) -> inference.AsyncInferenceResourceWithRawResponse:
+ from .resources.inference import AsyncInferenceResourceWithRawResponse
+
+ return AsyncInferenceResourceWithRawResponse(self._client.inference)
+
+ @cached_property
+ def knowledge_bases(
+ self,
+ ) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.knowledge_bases import (
+ AsyncKnowledgeBasesResourceWithRawResponse,
+ )
+
+ return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
+
+ @cached_property
+ def models(self) -> models.AsyncModelsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.models import AsyncModelsResourceWithRawResponse
+
+ return AsyncModelsResourceWithRawResponse(self._client.models)
+
+ @cached_property
+ def regions(self) -> regions.AsyncRegionsResourceWithRawResponse:
+ """Provides information about DigitalOcean data center regions."""
+ from .resources.regions import AsyncRegionsResourceWithRawResponse
+
+ return AsyncRegionsResourceWithRawResponse(self._client.regions)
+
+ @cached_property
+ def databases(self) -> databases.AsyncDatabasesResourceWithRawResponse:
+ from .resources.databases import AsyncDatabasesResourceWithRawResponse
+
+ return AsyncDatabasesResourceWithRawResponse(self._client.databases)
+
+ @cached_property
+ def nfs(self) -> nfs.AsyncNfsResourceWithRawResponse:
+ from .resources.nfs import AsyncNfsResourceWithRawResponse
+
+ return AsyncNfsResourceWithRawResponse(self._client.nfs)
+
+ @cached_property
+ def retrieve(self) -> retrieve.AsyncRetrieveResourceWithRawResponse:
+ from .resources.retrieve import AsyncRetrieveResourceWithRawResponse
+
+ return AsyncRetrieveResourceWithRawResponse(self._client.retrieve)
+
+ @cached_property
+ def apps(self) -> apps.AsyncAppsResourceWithRawResponse:
+ from .resources.apps import AsyncAppsResourceWithRawResponse
+
+ return AsyncAppsResourceWithRawResponse(self._client.apps)
+
+ @cached_property
+ def billing(self) -> billing.AsyncBillingResourceWithRawResponse:
+ """
+ The billing endpoints allow you to retrieve your account balance, invoices,
+ billing history, and insights.
+
+ **Balance:** By sending requests to the `/v2/customers/my/balance` endpoint, you can
+ retrieve the balance information for the requested customer account.
+
+ **Invoices:** [Invoices](https://docs.digitalocean.com/platform/billing/invoices/)
+ are generated on the first of each month for every DigitalOcean
+ customer. An invoice preview is generated daily, which can be accessed
+ with the `preview` keyword in place of `$INVOICE_UUID`. To interact with
+ invoices, you will generally send requests to the invoices endpoint at
+ `/v2/customers/my/invoices`.
+
+ **Billing History:** Billing history is a record of billing events for your account.
+ For example, entries may include events like payments made, invoices
+ issued, or credits granted. To interact with invoices, you
+ will generally send requests to the invoices endpoint at
+ `/v2/customers/my/billing_history`.
+
+ **Billing Insights:** Day-over-day changes in billing resource usage based on nightly invoice items,
+ including total amount, region, SKU, and description for a specified date range.
+ It is important to note that the daily resource usage may not reflect month-end billing totals when totaled for
+ a given month as nightly invoice items do not necessarily encompass all invoicing factors for the entire month.
+ `v2/billing/{account_urn}/insights/{start_date}/{end_date}` where account_urn is the URN of the customer
+ account, can be a team (do:team:uuid) or an organization (do:teamgroup:uuid). The date range specified by
+ start_date and end_date must be in YYYY-MM-DD format.
+ """
+ from .resources.billing import AsyncBillingResourceWithRawResponse
+
+ return AsyncBillingResourceWithRawResponse(self._client.billing)
+
+
+class GradientWithStreamedResponse:
+ _client: Gradient
+
+ def __init__(self, client: Gradient) -> None:
+ self._client = client
+
+ @cached_property
+ def agents(self) -> agents.AgentsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.agents import AgentsResourceWithStreamingResponse
+
+ return AgentsResourceWithStreamingResponse(self._client.agents)
+
+ @cached_property
+ def chat(self) -> chat.ChatResourceWithStreamingResponse:
+ from .resources.chat import ChatResourceWithStreamingResponse
+
+ return ChatResourceWithStreamingResponse(self._client.chat)
+
+ @cached_property
+ def images(self) -> images.ImagesResourceWithStreamingResponse:
+ """Generate images from text prompts using various AI models."""
+ from .resources.images import ImagesResourceWithStreamingResponse
+
+ return ImagesResourceWithStreamingResponse(self._client.images)
+
+ @cached_property
+ def responses(self) -> responses.ResponsesResourceWithStreamingResponse:
+ """Generate text-to-text responses from text prompts."""
+ from .resources.responses import ResponsesResourceWithStreamingResponse
+
+ return ResponsesResourceWithStreamingResponse(self._client.responses)
+
+ @cached_property
+ def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithStreamingResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ from .resources.gpu_droplets import GPUDropletsResourceWithStreamingResponse
+
+ return GPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets)
+
+ @cached_property
+ def inference(self) -> inference.InferenceResourceWithStreamingResponse:
+ from .resources.inference import InferenceResourceWithStreamingResponse
+
+ return InferenceResourceWithStreamingResponse(self._client.inference)
+
+ @cached_property
+ def knowledge_bases(
+ self,
+ ) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.knowledge_bases import (
+ KnowledgeBasesResourceWithStreamingResponse,
+ )
+
+ return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
+
+ @cached_property
+ def models(self) -> models.ModelsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.models import ModelsResourceWithStreamingResponse
+
+ return ModelsResourceWithStreamingResponse(self._client.models)
+
+ @cached_property
+ def regions(self) -> regions.RegionsResourceWithStreamingResponse:
+ """Provides information about DigitalOcean data center regions."""
+ from .resources.regions import RegionsResourceWithStreamingResponse
+
+ return RegionsResourceWithStreamingResponse(self._client.regions)
+
+ @cached_property
+ def databases(self) -> databases.DatabasesResourceWithStreamingResponse:
+ from .resources.databases import DatabasesResourceWithStreamingResponse
+
+ return DatabasesResourceWithStreamingResponse(self._client.databases)
+
+ @cached_property
+ def nfs(self) -> nfs.NfsResourceWithStreamingResponse:
+ from .resources.nfs import NfsResourceWithStreamingResponse
+
+ return NfsResourceWithStreamingResponse(self._client.nfs)
+
+ @cached_property
+ def retrieve(self) -> retrieve.RetrieveResourceWithStreamingResponse:
+ from .resources.retrieve import RetrieveResourceWithStreamingResponse
+
+ return RetrieveResourceWithStreamingResponse(self._client.retrieve)
+
+ @cached_property
+ def apps(self) -> apps.AppsResourceWithStreamingResponse:
+ from .resources.apps import AppsResourceWithStreamingResponse
+
+ return AppsResourceWithStreamingResponse(self._client.apps)
+
+ @cached_property
+ def billing(self) -> billing.BillingResourceWithStreamingResponse:
+ """
+ The billing endpoints allow you to retrieve your account balance, invoices,
+ billing history, and insights.
+
+ **Balance:** By sending requests to the `/v2/customers/my/balance` endpoint, you can
+ retrieve the balance information for the requested customer account.
+
+ **Invoices:** [Invoices](https://docs.digitalocean.com/platform/billing/invoices/)
+ are generated on the first of each month for every DigitalOcean
+ customer. An invoice preview is generated daily, which can be accessed
+ with the `preview` keyword in place of `$INVOICE_UUID`. To interact with
+ invoices, you will generally send requests to the invoices endpoint at
+ `/v2/customers/my/invoices`.
+
+ **Billing History:** Billing history is a record of billing events for your account.
+ For example, entries may include events like payments made, invoices
+ issued, or credits granted. To interact with invoices, you
+ will generally send requests to the invoices endpoint at
+ `/v2/customers/my/billing_history`.
+
+ **Billing Insights:** Day-over-day changes in billing resource usage based on nightly invoice items,
+ including total amount, region, SKU, and description for a specified date range.
+ It is important to note that the daily resource usage may not reflect month-end billing totals when totaled for
+ a given month as nightly invoice items do not necessarily encompass all invoicing factors for the entire month.
+ `v2/billing/{account_urn}/insights/{start_date}/{end_date}` where account_urn is the URN of the customer
+ account, can be a team (do:team:uuid) or an organization (do:teamgroup:uuid). The date range specified by
+ start_date and end_date must be in YYYY-MM-DD format.
+ """
+ from .resources.billing import BillingResourceWithStreamingResponse
+
+ return BillingResourceWithStreamingResponse(self._client.billing)
+
+
+class AsyncGradientWithStreamedResponse:
+ _client: AsyncGradient
+
+ def __init__(self, client: AsyncGradient) -> None:
+ self._client = client
+
+ @cached_property
+ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.agents import AsyncAgentsResourceWithStreamingResponse
+
+ return AsyncAgentsResourceWithStreamingResponse(self._client.agents)
+
+ @cached_property
+ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
+ from .resources.chat import AsyncChatResourceWithStreamingResponse
+
+ return AsyncChatResourceWithStreamingResponse(self._client.chat)
+
+ @cached_property
+ def images(self) -> images.AsyncImagesResourceWithStreamingResponse:
+ """Generate images from text prompts using various AI models."""
+ from .resources.images import AsyncImagesResourceWithStreamingResponse
+
+ return AsyncImagesResourceWithStreamingResponse(self._client.images)
+
+ @cached_property
+ def responses(self) -> responses.AsyncResponsesResourceWithStreamingResponse:
+ """Generate text-to-text responses from text prompts."""
+ from .resources.responses import AsyncResponsesResourceWithStreamingResponse
+
+ return AsyncResponsesResourceWithStreamingResponse(self._client.responses)
+
+ @cached_property
+ def gpu_droplets(
+ self,
+ ) -> gpu_droplets.AsyncGPUDropletsResourceWithStreamingResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ from .resources.gpu_droplets import (
+ AsyncGPUDropletsResourceWithStreamingResponse,
+ )
+
+ return AsyncGPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets)
+
+ @cached_property
+ def inference(self) -> inference.AsyncInferenceResourceWithStreamingResponse:
+ from .resources.inference import AsyncInferenceResourceWithStreamingResponse
+
+ return AsyncInferenceResourceWithStreamingResponse(self._client.inference)
+
+ @cached_property
+ def knowledge_bases(
+ self,
+ ) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.knowledge_bases import (
+ AsyncKnowledgeBasesResourceWithStreamingResponse,
+ )
+
+ return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
+
+ @cached_property
+ def models(self) -> models.AsyncModelsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ from .resources.models import AsyncModelsResourceWithStreamingResponse
+
+ return AsyncModelsResourceWithStreamingResponse(self._client.models)
+
+ @cached_property
+ def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse:
+ """Provides information about DigitalOcean data center regions."""
+ from .resources.regions import AsyncRegionsResourceWithStreamingResponse
+
+ return AsyncRegionsResourceWithStreamingResponse(self._client.regions)
+
+ @cached_property
+ def databases(self) -> databases.AsyncDatabasesResourceWithStreamingResponse:
+ from .resources.databases import AsyncDatabasesResourceWithStreamingResponse
+
+ return AsyncDatabasesResourceWithStreamingResponse(self._client.databases)
+
+ @cached_property
+ def nfs(self) -> nfs.AsyncNfsResourceWithStreamingResponse:
+ from .resources.nfs import AsyncNfsResourceWithStreamingResponse
+
+ return AsyncNfsResourceWithStreamingResponse(self._client.nfs)
+
+ @cached_property
+ def retrieve(self) -> retrieve.AsyncRetrieveResourceWithStreamingResponse:
+ from .resources.retrieve import AsyncRetrieveResourceWithStreamingResponse
+
+ return AsyncRetrieveResourceWithStreamingResponse(self._client.retrieve)
+
+ @cached_property
+ def apps(self) -> apps.AsyncAppsResourceWithStreamingResponse:
+ from .resources.apps import AsyncAppsResourceWithStreamingResponse
+
+ return AsyncAppsResourceWithStreamingResponse(self._client.apps)
+
+ @cached_property
+ def billing(self) -> billing.AsyncBillingResourceWithStreamingResponse:
+ """
+ The billing endpoints allow you to retrieve your account balance, invoices,
+ billing history, and insights.
+
+ **Balance:** By sending requests to the `/v2/customers/my/balance` endpoint, you can
+ retrieve the balance information for the requested customer account.
+
+ **Invoices:** [Invoices](https://docs.digitalocean.com/platform/billing/invoices/)
+ are generated on the first of each month for every DigitalOcean
+ customer. An invoice preview is generated daily, which can be accessed
+ with the `preview` keyword in place of `$INVOICE_UUID`. To interact with
+ invoices, you will generally send requests to the invoices endpoint at
+ `/v2/customers/my/invoices`.
+
+ **Billing History:** Billing history is a record of billing events for your account.
+ For example, entries may include events like payments made, invoices
+ issued, or credits granted. To interact with invoices, you
+ will generally send requests to the invoices endpoint at
+ `/v2/customers/my/billing_history`.
+
+ **Billing Insights:** Day-over-day changes in billing resource usage based on nightly invoice items,
+ including total amount, region, SKU, and description for a specified date range.
+ It is important to note that the daily resource usage may not reflect month-end billing totals when totaled for
+ a given month as nightly invoice items do not necessarily encompass all invoicing factors for the entire month.
+ `v2/billing/{account_urn}/insights/{start_date}/{end_date}` where account_urn is the URN of the customer
+ account, can be a team (do:team:uuid) or an organization (do:teamgroup:uuid). The date range specified by
+ start_date and end_date must be in YYYY-MM-DD format.
+ """
+ from .resources.billing import AsyncBillingResourceWithStreamingResponse
+
+ return AsyncBillingResourceWithStreamingResponse(self._client.billing)
+
+
+Client = Gradient
+
+AsyncClient = AsyncGradient
diff --git a/src/gradient/_compat.py b/src/gradient/_compat.py
new file mode 100644
index 00000000..786ff42a
--- /dev/null
+++ b/src/gradient/_compat.py
@@ -0,0 +1,219 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
+from datetime import date, datetime
+from typing_extensions import Self, Literal
+
+import pydantic
+from pydantic.fields import FieldInfo
+
+from ._types import IncEx, StrBytesIntFloat
+
+_T = TypeVar("_T")
+_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
+
+# --------------- Pydantic v2, v3 compatibility ---------------
+
+# Pyright incorrectly reports some of our functions as overriding a method when they don't
+# pyright: reportIncompatibleMethodOverride=false
+
+PYDANTIC_V1 = pydantic.VERSION.startswith("1.")
+
+if TYPE_CHECKING:
+
+ def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001
+ ...
+
+ def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: # noqa: ARG001
+ ...
+
+ def get_args(t: type[Any]) -> tuple[Any, ...]: # noqa: ARG001
+ ...
+
+ def is_union(tp: type[Any] | None) -> bool: # noqa: ARG001
+ ...
+
+ def get_origin(t: type[Any]) -> type[Any] | None: # noqa: ARG001
+ ...
+
+ def is_literal_type(type_: type[Any]) -> bool: # noqa: ARG001
+ ...
+
+ def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001
+ ...
+
+else:
+ # v1 re-exports
+ if PYDANTIC_V1:
+ from pydantic.typing import (
+ get_args as get_args,
+ is_union as is_union,
+ get_origin as get_origin,
+ is_typeddict as is_typeddict,
+ is_literal_type as is_literal_type,
+ )
+ from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
+ else:
+ from ._utils import (
+ get_args as get_args,
+ is_union as is_union,
+ get_origin as get_origin,
+ parse_date as parse_date,
+ is_typeddict as is_typeddict,
+ parse_datetime as parse_datetime,
+ is_literal_type as is_literal_type,
+ )
+
+
+# refactored config
+if TYPE_CHECKING:
+ from pydantic import ConfigDict as ConfigDict
+else:
+ if PYDANTIC_V1:
+ # TODO: provide an error message here?
+ ConfigDict = None
+ else:
+ from pydantic import ConfigDict as ConfigDict
+
+
+# renamed methods / properties
+def parse_obj(model: type[_ModelT], value: object) -> _ModelT:
+ if PYDANTIC_V1:
+ return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+ else:
+ return model.model_validate(value)
+
+
+def field_is_required(field: FieldInfo) -> bool:
+ if PYDANTIC_V1:
+ return field.required # type: ignore
+ return field.is_required()
+
+
+def field_get_default(field: FieldInfo) -> Any:
+ value = field.get_default()
+ if PYDANTIC_V1:
+ return value
+ from pydantic_core import PydanticUndefined
+
+ if value == PydanticUndefined:
+ return None
+ return value
+
+
+def field_outer_type(field: FieldInfo) -> Any:
+ if PYDANTIC_V1:
+ return field.outer_type_ # type: ignore
+ return field.annotation
+
+
+def get_model_config(model: type[pydantic.BaseModel]) -> Any:
+ if PYDANTIC_V1:
+ return model.__config__ # type: ignore
+ return model.model_config
+
+
+def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]:
+ if PYDANTIC_V1:
+ return model.__fields__ # type: ignore
+ return model.model_fields
+
+
+def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT:
+ if PYDANTIC_V1:
+ return model.copy(deep=deep) # type: ignore
+ return model.model_copy(deep=deep)
+
+
+def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
+ if PYDANTIC_V1:
+ return model.json(indent=indent) # type: ignore
+ return model.model_dump_json(indent=indent)
+
+
+def model_dump(
+ model: pydantic.BaseModel,
+ *,
+ exclude: IncEx | None = None,
+ exclude_unset: bool = False,
+ exclude_defaults: bool = False,
+ warnings: bool = True,
+ mode: Literal["json", "python"] = "python",
+ by_alias: bool | None = None,
+) -> dict[str, Any]:
+ if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
+ return model.model_dump(
+ mode=mode,
+ exclude=exclude,
+ exclude_unset=exclude_unset,
+ exclude_defaults=exclude_defaults,
+ # warnings are not supported in Pydantic v1
+ warnings=True if PYDANTIC_V1 else warnings,
+ by_alias=by_alias,
+ )
+ return cast(
+ "dict[str, Any]",
+ model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+ exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, by_alias=bool(by_alias)
+ ),
+ )
+
+
+def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
+ if PYDANTIC_V1:
+ return model.parse_obj(data) # pyright: ignore[reportDeprecated]
+ return model.model_validate(data)
+
+
+# generic models
+if TYPE_CHECKING:
+
+ class GenericModel(pydantic.BaseModel): ...
+
+else:
+ if PYDANTIC_V1:
+ import pydantic.generics
+
+ class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
+ else:
+ # there no longer needs to be a distinction in v2 but
+ # we still have to create our own subclass to avoid
+ # inconsistent MRO ordering errors
+ class GenericModel(pydantic.BaseModel): ...
+
+
+# cached properties
+if TYPE_CHECKING:
+ cached_property = property
+
+ # we define a separate type (copied from typeshed)
+ # that represents that `cached_property` is `set`able
+ # at runtime, which differs from `@property`.
+ #
+ # this is a separate type as editors likely special case
+ # `@property` and we don't want to cause issues just to have
+ # more helpful internal types.
+
+ class typed_cached_property(Generic[_T]):
+ func: Callable[[Any], _T]
+ attrname: str | None
+
+ def __init__(self, func: Callable[[Any], _T]) -> None: ...
+
+ @overload
+ def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...
+
+ @overload
+ def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...
+
+ def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
+ raise NotImplementedError()
+
+ def __set_name__(self, owner: type[Any], name: str) -> None: ...
+
+ # __set__ is not defined at runtime, but @cached_property is designed to be settable
+ def __set__(self, instance: object, value: _T) -> None: ...
+else:
+ from functools import cached_property as cached_property
+
+ typed_cached_property = cached_property
diff --git a/src/digitalocean_genai_sdk/_constants.py b/src/gradient/_constants.py
similarity index 100%
rename from src/digitalocean_genai_sdk/_constants.py
rename to src/gradient/_constants.py
diff --git a/src/gradient/_exceptions.py b/src/gradient/_exceptions.py
new file mode 100644
index 00000000..f0c6671d
--- /dev/null
+++ b/src/gradient/_exceptions.py
@@ -0,0 +1,154 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+__all__ = [
+ "BadRequestError",
+ "AuthenticationError",
+ "PermissionDeniedError",
+ "NotFoundError",
+ "ConflictError",
+ "UnprocessableEntityError",
+ "RateLimitError",
+ "InternalServerError",
+ "IndexingJobError",
+ "IndexingJobTimeoutError",
+ "AgentDeploymentError",
+ "AgentDeploymentTimeoutError",
+]
+
+
+class GradientError(Exception):
+ pass
+
+
+class APIError(GradientError):
+ message: str
+ request: httpx.Request
+
+ body: object | None
+ """The API response body.
+
+ If the API responded with a valid JSON structure then this property will be the
+ decoded result.
+
+ If it isn't a valid JSON structure then this will be the raw response.
+
+ If there was no response associated with this error then it will be `None`.
+ """
+
+ def __init__(self, message: str, request: httpx.Request, *, body: object | None) -> None: # noqa: ARG002
+ super().__init__(message)
+ self.request = request
+ self.message = message
+ self.body = body
+
+
+class APIResponseValidationError(APIError):
+ response: httpx.Response
+ status_code: int
+
+ def __init__(self, response: httpx.Response, body: object | None, *, message: str | None = None) -> None:
+ super().__init__(message or "Data returned by API invalid for expected schema.", response.request, body=body)
+ self.response = response
+ self.status_code = response.status_code
+
+
+class APIStatusError(APIError):
+ """Raised when an API response has a status code of 4xx or 5xx."""
+
+ response: httpx.Response
+ status_code: int
+
+ def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None:
+ super().__init__(message, response.request, body=body)
+ self.response = response
+ self.status_code = response.status_code
+
+
+class APIConnectionError(APIError):
+ def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None:
+ super().__init__(message, request, body=None)
+
+
+class APITimeoutError(APIConnectionError):
+ def __init__(self, request: httpx.Request) -> None:
+ super().__init__(message="Request timed out.", request=request)
+
+
+class BadRequestError(APIStatusError):
+ status_code: Literal[400] = 400 # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class AuthenticationError(APIStatusError):
+ status_code: Literal[401] = 401 # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class PermissionDeniedError(APIStatusError):
+ status_code: Literal[403] = 403 # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class NotFoundError(APIStatusError):
+ status_code: Literal[404] = 404 # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class ConflictError(APIStatusError):
+ status_code: Literal[409] = 409 # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class UnprocessableEntityError(APIStatusError):
+ status_code: Literal[422] = 422 # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class RateLimitError(APIStatusError):
+ status_code: Literal[429] = 429 # pyright: ignore[reportIncompatibleVariableOverride]
+
+
+class InternalServerError(APIStatusError):
+ pass
+
+
+class IndexingJobError(GradientError):
+ """Raised when an indexing job fails, encounters an error, or is cancelled."""
+
+ uuid: str
+ phase: str
+
+ def __init__(self, message: str, *, uuid: str, phase: str) -> None:
+ super().__init__(message)
+ self.uuid = uuid
+ self.phase = phase
+
+
+class IndexingJobTimeoutError(GradientError):
+ """Raised when polling for an indexing job times out."""
+
+ uuid: str
+ phase: str
+ timeout: float
+
+ def __init__(self, message: str, *, uuid: str, phase: str, timeout: float) -> None:
+ super().__init__(message)
+ self.uuid = uuid
+ self.phase = phase
+ self.timeout = timeout
+
+
+class AgentDeploymentError(GradientError):
+ """Raised when an agent deployment fails."""
+
+ def __init__(self, message: str, status: str) -> None:
+ super().__init__(message)
+ self.status = status
+
+
+class AgentDeploymentTimeoutError(GradientError):
+ """Raised when waiting for an agent deployment times out."""
+
+ def __init__(self, message: str, agent_id: str) -> None:
+ super().__init__(message)
+ self.agent_id = agent_id
diff --git a/src/digitalocean_genai_sdk/_files.py b/src/gradient/_files.py
similarity index 88%
rename from src/digitalocean_genai_sdk/_files.py
rename to src/gradient/_files.py
index df28b382..cc14c14f 100644
--- a/src/digitalocean_genai_sdk/_files.py
+++ b/src/gradient/_files.py
@@ -34,7 +34,7 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
if not is_file_content(obj):
prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`"
raise RuntimeError(
- f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main#file-uploads"
+ f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead."
) from None
@@ -69,12 +69,12 @@ def _transform_file(file: FileTypes) -> HttpxFileTypes:
return file
if is_tuple_t(file):
- return (file[0], _read_file_content(file[1]), *file[2:])
+ return (file[0], read_file_content(file[1]), *file[2:])
raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
-def _read_file_content(file: FileContent) -> HttpxFileContent:
+def read_file_content(file: FileContent) -> HttpxFileContent:
if isinstance(file, os.PathLike):
return pathlib.Path(file).read_bytes()
return file
@@ -111,12 +111,12 @@ async def _async_transform_file(file: FileTypes) -> HttpxFileTypes:
return file
if is_tuple_t(file):
- return (file[0], await _async_read_file_content(file[1]), *file[2:])
+ return (file[0], await async_read_file_content(file[1]), *file[2:])
raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
-async def _async_read_file_content(file: FileContent) -> HttpxFileContent:
+async def async_read_file_content(file: FileContent) -> HttpxFileContent:
if isinstance(file, os.PathLike):
return await anyio.Path(file).read_bytes()
diff --git a/src/digitalocean_genai_sdk/_models.py b/src/gradient/_models.py
similarity index 84%
rename from src/digitalocean_genai_sdk/_models.py
rename to src/gradient/_models.py
index 798956f1..29070e05 100644
--- a/src/digitalocean_genai_sdk/_models.py
+++ b/src/gradient/_models.py
@@ -2,9 +2,24 @@
import os
import inspect
-from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast
+import weakref
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Type,
+ Union,
+ Generic,
+ TypeVar,
+ Callable,
+ Iterable,
+ Optional,
+ AsyncIterable,
+ cast,
+)
from datetime import date, datetime
from typing_extensions import (
+ List,
Unpack,
Literal,
ClassVar,
@@ -49,7 +64,7 @@
strip_annotated_type,
)
from ._compat import (
- PYDANTIC_V2,
+ PYDANTIC_V1,
ConfigDict,
GenericModel as BaseGenericModel,
get_args,
@@ -80,11 +95,7 @@ class _ConfigProtocol(Protocol):
class BaseModel(pydantic.BaseModel):
- if PYDANTIC_V2:
- model_config: ClassVar[ConfigDict] = ConfigDict(
- extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
- )
- else:
+ if PYDANTIC_V1:
@property
@override
@@ -94,6 +105,10 @@ def model_fields_set(self) -> set[str]:
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
extra: Any = pydantic.Extra.allow # type: ignore
+ else:
+ model_config: ClassVar[ConfigDict] = ConfigDict(
+ extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
+ )
def to_dict(
self,
@@ -207,28 +222,32 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride]
else:
fields_values[name] = field_get_default(field)
+ extra_field_type = _get_extra_fields_type(__cls)
+
_extra = {}
for key, value in values.items():
if key not in model_fields:
- if PYDANTIC_V2:
- _extra[key] = value
- else:
+ parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value
+
+ if PYDANTIC_V1:
_fields_set.add(key)
- fields_values[key] = value
+ fields_values[key] = parsed
+ else:
+ _extra[key] = parsed
object.__setattr__(m, "__dict__", fields_values)
- if PYDANTIC_V2:
- # these properties are copied from Pydantic's `model_construct()` method
- object.__setattr__(m, "__pydantic_private__", None)
- object.__setattr__(m, "__pydantic_extra__", _extra)
- object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
- else:
+ if PYDANTIC_V1:
# init_private_attributes() does not exist in v2
m._init_private_attributes() # type: ignore
# copied from Pydantic v1's `construct()` method
object.__setattr__(m, "__fields_set__", _fields_set)
+ else:
+ # these properties are copied from Pydantic's `model_construct()` method
+ object.__setattr__(m, "__pydantic_private__", None)
+ object.__setattr__(m, "__pydantic_extra__", _extra)
+ object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
return m
@@ -238,7 +257,7 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride]
# although not in practice
model_construct = construct
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
# we define aliases for some of the new pydantic v2 methods so
# that we can just document these methods without having to specify
# a specific pydantic version as some users may not know which
@@ -251,13 +270,15 @@ def model_dump(
mode: Literal["json", "python"] | str = "python",
include: IncEx | None = None,
exclude: IncEx | None = None,
- by_alias: bool = False,
+ context: Any | None = None,
+ by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
+ exclude_computed_fields: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
- context: dict[str, Any] | None = None,
+ fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
) -> dict[str, Any]:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump
@@ -266,16 +287,24 @@ def model_dump(
Args:
mode: The mode in which `to_python` should run.
- If mode is 'json', the dictionary will only contain JSON serializable types.
- If mode is 'python', the dictionary may contain any Python objects.
- include: A list of fields to include in the output.
- exclude: A list of fields to exclude from the output.
+ If mode is 'json', the output will only contain JSON serializable types.
+ If mode is 'python', the output may contain non-JSON-serializable Python objects.
+ include: A set of fields to include in the output.
+ exclude: A set of fields to exclude from the output.
+ context: Additional context to pass to the serializer.
by_alias: Whether to use the field's alias in the dictionary key if defined.
- exclude_unset: Whether to exclude fields that are unset or None from the output.
- exclude_defaults: Whether to exclude fields that are set to their default value from the output.
- exclude_none: Whether to exclude fields that have a value of `None` from the output.
- round_trip: Whether to enable serialization and deserialization round-trip support.
- warnings: Whether to log warnings when invalid fields are encountered.
+ exclude_unset: Whether to exclude fields that have not been explicitly set.
+ exclude_defaults: Whether to exclude fields that are set to their default value.
+ exclude_none: Whether to exclude fields that have a value of `None`.
+ exclude_computed_fields: Whether to exclude computed fields.
+ While this can be useful for round-tripping, it is usually recommended to use the dedicated
+ `round_trip` parameter instead.
+ round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T].
+ warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
+ "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
+ fallback: A function to call when an unknown value is encountered. If not provided,
+ a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
+ serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
Returns:
A dictionary representation of the model.
@@ -290,31 +319,38 @@ def model_dump(
raise ValueError("context is only supported in Pydantic v2")
if serialize_as_any != False:
raise ValueError("serialize_as_any is only supported in Pydantic v2")
+ if fallback is not None:
+ raise ValueError("fallback is only supported in Pydantic v2")
+ if exclude_computed_fields != False:
+ raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
dumped = super().dict( # pyright: ignore[reportDeprecated]
include=include,
exclude=exclude,
- by_alias=by_alias,
+ by_alias=by_alias if by_alias is not None else False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
- return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped
+ return cast("dict[str, Any]", json_safe(dumped)) if mode == "json" else dumped
@override
def model_dump_json(
self,
*,
indent: int | None = None,
+ ensure_ascii: bool = False,
include: IncEx | None = None,
exclude: IncEx | None = None,
- by_alias: bool = False,
+ context: Any | None = None,
+ by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
+ exclude_computed_fields: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
- context: dict[str, Any] | None = None,
+ fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
) -> str:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json
@@ -343,11 +379,17 @@ def model_dump_json(
raise ValueError("context is only supported in Pydantic v2")
if serialize_as_any != False:
raise ValueError("serialize_as_any is only supported in Pydantic v2")
+ if fallback is not None:
+ raise ValueError("fallback is only supported in Pydantic v2")
+ if ensure_ascii != False:
+ raise ValueError("ensure_ascii is only supported in Pydantic v2")
+ if exclude_computed_fields != False:
+ raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
return super().json( # type: ignore[reportDeprecated]
indent=indent,
include=include,
exclude=exclude,
- by_alias=by_alias,
+ by_alias=by_alias if by_alias is not None else False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
@@ -358,15 +400,32 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object:
if value is None:
return field_get_default(field)
- if PYDANTIC_V2:
- type_ = field.annotation
- else:
+ if PYDANTIC_V1:
type_ = cast(type, field.outer_type_) # type: ignore
+ else:
+ type_ = field.annotation # type: ignore
if type_ is None:
raise RuntimeError(f"Unexpected field type is None for {key}")
- return construct_type(value=value, type_=type_)
+ return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None))
+
+
+def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None:
+ if PYDANTIC_V1:
+ # TODO
+ return None
+
+ schema = cls.__pydantic_core_schema__
+ if schema["type"] == "model":
+ fields = schema["schema"]
+ if fields["type"] == "model-fields":
+ extras = fields.get("extras_schema")
+ if extras and "cls" in extras:
+ # mypy can't narrow the type
+ return extras["cls"] # type: ignore[no-any-return]
+
+ return None
def is_basemodel(type_: type) -> bool:
@@ -420,7 +479,7 @@ def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T:
return cast(_T, construct_type(value=value, type_=type_))
-def construct_type(*, value: object, type_: object) -> object:
+def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object:
"""Loose coercion to the expected type with construction of nested values.
If the given value does not match the expected type then it is returned as-is.
@@ -438,8 +497,10 @@ def construct_type(*, value: object, type_: object) -> object:
type_ = type_.__value__ # type: ignore[unreachable]
# unwrap `Annotated[T, ...]` -> `T`
- if is_annotated_type(type_):
- meta: tuple[Any, ...] = get_args(type_)[1:]
+ if metadata is not None and len(metadata) > 0:
+ meta: tuple[Any, ...] = tuple(metadata)
+ elif is_annotated_type(type_):
+ meta = get_args(type_)[1:]
type_ = extract_type_arg(type_, 0)
else:
meta = tuple()
@@ -543,6 +604,9 @@ class CachedDiscriminatorType(Protocol):
__discriminator__: DiscriminatorDetails
+DISCRIMINATOR_CACHE: weakref.WeakKeyDictionary[type, DiscriminatorDetails] = weakref.WeakKeyDictionary()
+
+
class DiscriminatorDetails:
field_name: str
"""The name of the discriminator field in the variant class, e.g.
@@ -585,8 +649,9 @@ def __init__(
def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None:
- if isinstance(union, CachedDiscriminatorType):
- return union.__discriminator__
+ cached = DISCRIMINATOR_CACHE.get(union)
+ if cached is not None:
+ return cached
discriminator_field_name: str | None = None
@@ -604,30 +669,30 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
for variant in get_args(union):
variant = strip_annotated_type(variant)
if is_basemodel_type(variant):
- if PYDANTIC_V2:
- field = _extract_field_schema_pv2(variant, discriminator_field_name)
- if not field:
+ if PYDANTIC_V1:
+ field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+ if not field_info:
continue
# Note: if one variant defines an alias then they all should
- discriminator_alias = field.get("serialization_alias")
-
- field_schema = field["schema"]
+ discriminator_alias = field_info.alias
- if field_schema["type"] == "literal":
- for entry in cast("LiteralSchema", field_schema)["expected"]:
+ if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
+ for entry in get_args(annotation):
if isinstance(entry, str):
mapping[entry] = variant
else:
- field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
- if not field_info:
+ field = _extract_field_schema_pv2(variant, discriminator_field_name)
+ if not field:
continue
# Note: if one variant defines an alias then they all should
- discriminator_alias = field_info.alias
+ discriminator_alias = field.get("serialization_alias")
- if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
- for entry in get_args(annotation):
+ field_schema = field["schema"]
+
+ if field_schema["type"] == "literal":
+ for entry in cast("LiteralSchema", field_schema)["expected"]:
if isinstance(entry, str):
mapping[entry] = variant
@@ -639,7 +704,7 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
discriminator_field=discriminator_field_name,
discriminator_alias=discriminator_alias,
)
- cast(CachedDiscriminatorType, union).__discriminator__ = details
+ DISCRIMINATOR_CACHE.setdefault(union, details)
return details
@@ -690,7 +755,7 @@ class GenericModel(BaseGenericModel, BaseModel):
pass
-if PYDANTIC_V2:
+if not PYDANTIC_V1:
from pydantic import TypeAdapter as _TypeAdapter
_CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter))
@@ -735,8 +800,10 @@ class FinalRequestOptionsInput(TypedDict, total=False):
timeout: float | Timeout | None
files: HttpxRequestFiles | None
idempotency_key: str
+ content: Union[bytes, bytearray, IO[bytes], Iterable[bytes], AsyncIterable[bytes], None]
json_data: Body
extra_json: AnyMapping
+ follow_redirects: bool
@final
@@ -750,18 +817,20 @@ class FinalRequestOptions(pydantic.BaseModel):
files: Union[HttpxRequestFiles, None] = None
idempotency_key: Union[str, None] = None
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
+ follow_redirects: Union[bool, None] = None
+ content: Union[bytes, bytearray, IO[bytes], Iterable[bytes], AsyncIterable[bytes], None] = None
# It should be noted that we cannot use `json` here as that would override
# a BaseModel method in an incompatible fashion.
json_data: Union[Body, None] = None
extra_json: Union[AnyMapping, None] = None
- if PYDANTIC_V2:
- model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
- else:
+ if PYDANTIC_V1:
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
arbitrary_types_allowed: bool = True
+ else:
+ model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
def get_max_retries(self, max_retries: int) -> int:
if isinstance(self.max_retries, NotGiven):
@@ -794,9 +863,9 @@ def construct( # type: ignore
key: strip_not_given(value)
for key, value in values.items()
}
- if PYDANTIC_V2:
- return super().model_construct(_fields_set, **kwargs)
- return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated]
+ if PYDANTIC_V1:
+ return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated]
+ return super().model_construct(_fields_set, **kwargs)
if not TYPE_CHECKING:
# type checkers incorrectly complain about this assignment
diff --git a/src/digitalocean_genai_sdk/_qs.py b/src/gradient/_qs.py
similarity index 91%
rename from src/digitalocean_genai_sdk/_qs.py
rename to src/gradient/_qs.py
index 274320ca..ada6fd3f 100644
--- a/src/digitalocean_genai_sdk/_qs.py
+++ b/src/gradient/_qs.py
@@ -4,7 +4,7 @@
from urllib.parse import parse_qs, urlencode
from typing_extensions import Literal, get_args
-from ._types import NOT_GIVEN, NotGiven, NotGivenOr
+from ._types import NotGiven, not_given
from ._utils import flatten
_T = TypeVar("_T")
@@ -41,8 +41,8 @@ def stringify(
self,
params: Params,
*,
- array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
- nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
+ array_format: ArrayFormat | NotGiven = not_given,
+ nested_format: NestedFormat | NotGiven = not_given,
) -> str:
return urlencode(
self.stringify_items(
@@ -56,8 +56,8 @@ def stringify_items(
self,
params: Params,
*,
- array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
- nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
+ array_format: ArrayFormat | NotGiven = not_given,
+ nested_format: NestedFormat | NotGiven = not_given,
) -> list[tuple[str, str]]:
opts = Options(
qs=self,
@@ -143,8 +143,8 @@ def __init__(
self,
qs: Querystring = _qs,
*,
- array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
- nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
+ array_format: ArrayFormat | NotGiven = not_given,
+ nested_format: NestedFormat | NotGiven = not_given,
) -> None:
self.array_format = qs.array_format if isinstance(array_format, NotGiven) else array_format
self.nested_format = qs.nested_format if isinstance(nested_format, NotGiven) else nested_format
diff --git a/src/digitalocean_genai_sdk/_resource.py b/src/gradient/_resource.py
similarity index 76%
rename from src/digitalocean_genai_sdk/_resource.py
rename to src/gradient/_resource.py
index fe43ec28..f2bb6c14 100644
--- a/src/digitalocean_genai_sdk/_resource.py
+++ b/src/gradient/_resource.py
@@ -8,13 +8,13 @@
import anyio
if TYPE_CHECKING:
- from ._client import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
+ from ._client import Gradient, AsyncGradient
class SyncAPIResource:
- _client: DigitaloceanGenaiSDK
+ _client: Gradient
- def __init__(self, client: DigitaloceanGenaiSDK) -> None:
+ def __init__(self, client: Gradient) -> None:
self._client = client
self._get = client.get
self._post = client.post
@@ -28,9 +28,9 @@ def _sleep(self, seconds: float) -> None:
class AsyncAPIResource:
- _client: AsyncDigitaloceanGenaiSDK
+ _client: AsyncGradient
- def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None:
+ def __init__(self, client: AsyncGradient) -> None:
self._client = client
self._get = client.get
self._post = client.post
diff --git a/src/digitalocean_genai_sdk/_response.py b/src/gradient/_response.py
similarity index 98%
rename from src/digitalocean_genai_sdk/_response.py
rename to src/gradient/_response.py
index 7f1fff1d..0e9dc172 100644
--- a/src/digitalocean_genai_sdk/_response.py
+++ b/src/gradient/_response.py
@@ -29,7 +29,7 @@
from ._models import BaseModel, is_basemodel
from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER
from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
-from ._exceptions import DigitaloceanGenaiSDKError, APIResponseValidationError
+from ._exceptions import GradientError, APIResponseValidationError
if TYPE_CHECKING:
from ._models import FinalRequestOptions
@@ -152,6 +152,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
),
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -162,6 +163,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to=extract_stream_chunk_type(self._stream_cls),
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -175,6 +177,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to=cast_to,
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -217,9 +220,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
and not issubclass(origin, BaseModel)
and issubclass(origin, pydantic.BaseModel)
):
- raise TypeError(
- "Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`"
- )
+ raise TypeError("Pydantic models must subclass our base model type, e.g. `from gradient import BaseModel`")
if (
cast_to is not object
@@ -285,7 +286,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T:
the `to` argument, e.g.
```py
- from digitalocean_genai_sdk import BaseModel
+ from gradient import BaseModel
class MyModel(BaseModel):
@@ -387,7 +388,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T:
the `to` argument, e.g.
```py
- from digitalocean_genai_sdk import BaseModel
+ from gradient import BaseModel
class MyModel(BaseModel):
@@ -558,11 +559,11 @@ async def stream_to_file(
class MissingStreamClassError(TypeError):
def __init__(self) -> None:
super().__init__(
- "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `digitalocean_genai_sdk._streaming` for reference",
+ "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradient._streaming` for reference",
)
-class StreamAlreadyConsumed(DigitaloceanGenaiSDKError):
+class StreamAlreadyConsumed(GradientError):
"""
Attempted to read or stream content, but the content has already
been streamed.
diff --git a/src/digitalocean_genai_sdk/_streaming.py b/src/gradient/_streaming.py
similarity index 79%
rename from src/digitalocean_genai_sdk/_streaming.py
rename to src/gradient/_streaming.py
index 96c3f3d3..92ce2af2 100644
--- a/src/digitalocean_genai_sdk/_streaming.py
+++ b/src/gradient/_streaming.py
@@ -4,15 +4,17 @@
import json
import inspect
from types import TracebackType
-from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast
+from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, Optional, AsyncIterator, cast
from typing_extensions import Self, Protocol, TypeGuard, override, get_origin, runtime_checkable
import httpx
-from ._utils import extract_type_var_from_base
+from ._utils import is_mapping, extract_type_var_from_base
+from ._exceptions import APIError
if TYPE_CHECKING:
- from ._client import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
+ from ._client import Gradient, AsyncGradient
+ from ._models import FinalRequestOptions
_T = TypeVar("_T")
@@ -22,7 +24,7 @@ class Stream(Generic[_T]):
"""Provides the core interface to iterate over a synchronous stream response."""
response: httpx.Response
-
+ _options: Optional[FinalRequestOptions] = None
_decoder: SSEBytesDecoder
def __init__(
@@ -30,11 +32,13 @@ def __init__(
*,
cast_to: type[_T],
response: httpx.Response,
- client: DigitaloceanGenaiSDK,
+ client: Gradient,
+ options: Optional[FinalRequestOptions] = None,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
+ self._options = options
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
@@ -54,12 +58,30 @@ def __stream__(self) -> Iterator[_T]:
process_data = self._client._process_response_data
iterator = self._iter_events()
- for sse in iterator:
- yield process_data(data=sse.json(), cast_to=cast_to, response=response)
-
- # Ensure the entire stream is consumed
- for _sse in iterator:
- ...
+ try:
+ for sse in iterator:
+ if sse.data.startswith("[DONE]"):
+ break
+
+ data = sse.json()
+ if is_mapping(data) and data.get("error"):
+ message = None
+ error = data.get("error")
+ if is_mapping(error):
+ message = error.get("message")
+ if not message or not isinstance(message, str):
+ message = "An error occurred during streaming"
+
+ raise APIError(
+ message=message,
+ request=self.response.request,
+ body=data["error"],
+ )
+
+ yield process_data(data=data, cast_to=cast_to, response=response)
+ finally:
+ # Ensure the response is closed even if the consumer doesn't read all data
+ response.close()
def __enter__(self) -> Self:
return self
@@ -85,7 +107,7 @@ class AsyncStream(Generic[_T]):
"""Provides the core interface to iterate over an asynchronous stream response."""
response: httpx.Response
-
+ _options: Optional[FinalRequestOptions] = None
_decoder: SSEDecoder | SSEBytesDecoder
def __init__(
@@ -93,11 +115,13 @@ def __init__(
*,
cast_to: type[_T],
response: httpx.Response,
- client: AsyncDigitaloceanGenaiSDK,
+ client: AsyncGradient,
+ options: Optional[FinalRequestOptions] = None,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
+ self._options = options
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
@@ -118,12 +142,30 @@ async def __stream__(self) -> AsyncIterator[_T]:
process_data = self._client._process_response_data
iterator = self._iter_events()
- async for sse in iterator:
- yield process_data(data=sse.json(), cast_to=cast_to, response=response)
-
- # Ensure the entire stream is consumed
- async for _sse in iterator:
- ...
+ try:
+ async for sse in iterator:
+ if sse.data.startswith("[DONE]"):
+ break
+
+ data = sse.json()
+ if is_mapping(data) and data.get("error"):
+ message = None
+ error = data.get("error")
+ if is_mapping(error):
+ message = error.get("message")
+ if not message or not isinstance(message, str):
+ message = "An error occurred during streaming"
+
+ raise APIError(
+ message=message,
+ request=self.response.request,
+ body=data["error"],
+ )
+
+ yield process_data(data=data, cast_to=cast_to, response=response)
+ finally:
+ # Ensure the response is closed even if the consumer doesn't read all data
+ await response.aclose()
async def __aenter__(self) -> Self:
return self
diff --git a/src/gradient/_types.py b/src/gradient/_types.py
new file mode 100644
index 00000000..338a463d
--- /dev/null
+++ b/src/gradient/_types.py
@@ -0,0 +1,270 @@
+from __future__ import annotations
+
+from os import PathLike
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ List,
+ Type,
+ Tuple,
+ Union,
+ Mapping,
+ TypeVar,
+ Callable,
+ Iterable,
+ Iterator,
+ Optional,
+ Sequence,
+ AsyncIterable,
+)
+from typing_extensions import (
+ Set,
+ Literal,
+ Protocol,
+ TypeAlias,
+ TypedDict,
+ SupportsIndex,
+ overload,
+ override,
+ runtime_checkable,
+)
+
+import httpx
+import pydantic
+from httpx import URL, Proxy, Timeout, Response, BaseTransport, AsyncBaseTransport
+
+if TYPE_CHECKING:
+ from ._models import BaseModel
+ from ._response import APIResponse, AsyncAPIResponse
+
+Transport = BaseTransport
+AsyncTransport = AsyncBaseTransport
+Query = Mapping[str, object]
+Body = object
+AnyMapping = Mapping[str, object]
+ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
+_T = TypeVar("_T")
+
+
+# Approximates httpx internal ProxiesTypes and RequestFiles types
+# while adding support for `PathLike` instances
+ProxiesDict = Dict["str | URL", Union[None, str, URL, Proxy]]
+ProxiesTypes = Union[str, Proxy, ProxiesDict]
+if TYPE_CHECKING:
+ Base64FileInput = Union[IO[bytes], PathLike[str]]
+ FileContent = Union[IO[bytes], bytes, PathLike[str]]
+else:
+ Base64FileInput = Union[IO[bytes], PathLike]
+ FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8.
+
+
+# Used for sending raw binary data / streaming data in request bodies
+# e.g. for file uploads without multipart encoding
+BinaryTypes = Union[bytes, bytearray, IO[bytes], Iterable[bytes]]
+AsyncBinaryTypes = Union[bytes, bytearray, IO[bytes], AsyncIterable[bytes]]
+
+FileTypes = Union[
+ # file (or bytes)
+ FileContent,
+ # (filename, file (or bytes))
+ Tuple[Optional[str], FileContent],
+ # (filename, file (or bytes), content_type)
+ Tuple[Optional[str], FileContent, Optional[str]],
+ # (filename, file (or bytes), content_type, headers)
+ Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]],
+]
+RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
+
+# duplicate of the above but without our custom file support
+HttpxFileContent = Union[IO[bytes], bytes]
+HttpxFileTypes = Union[
+ # file (or bytes)
+ HttpxFileContent,
+ # (filename, file (or bytes))
+ Tuple[Optional[str], HttpxFileContent],
+ # (filename, file (or bytes), content_type)
+ Tuple[Optional[str], HttpxFileContent, Optional[str]],
+ # (filename, file (or bytes), content_type, headers)
+ Tuple[Optional[str], HttpxFileContent, Optional[str], Mapping[str, str]],
+]
+HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[Tuple[str, HttpxFileTypes]]]
+
+# Workaround to support (cast_to: Type[ResponseT]) -> ResponseT
+# where ResponseT includes `None`. In order to support directly
+# passing `None`, overloads would have to be defined for every
+# method that uses `ResponseT` which would lead to an unacceptable
+# amount of code duplication and make it unreadable. See _base_client.py
+# for example usage.
+#
+# This unfortunately means that you will either have
+# to import this type and pass it explicitly:
+#
+# from gradient import NoneType
+# client.get('/foo', cast_to=NoneType)
+#
+# or build it yourself:
+#
+# client.get('/foo', cast_to=type(None))
+if TYPE_CHECKING:
+ NoneType: Type[None]
+else:
+ NoneType = type(None)
+
+
+class RequestOptions(TypedDict, total=False):
+ headers: Headers
+ max_retries: int
+ timeout: float | Timeout | None
+ params: Query
+ extra_json: AnyMapping
+ idempotency_key: str
+ follow_redirects: bool
+
+
+# Sentinel class used until PEP 0661 is accepted
+class NotGiven:
+ """
+ For parameters with a meaningful None value, we need to distinguish between
+ the user explicitly passing None, and the user not passing the parameter at
+ all.
+
+ User code shouldn't need to use not_given directly.
+
+ For example:
+
+ ```py
+ def create(timeout: Timeout | None | NotGiven = not_given): ...
+
+
+ create(timeout=1) # 1s timeout
+ create(timeout=None) # No timeout
+ create() # Default timeout behavior
+ ```
+ """
+
+ def __bool__(self) -> Literal[False]:
+ return False
+
+ @override
+ def __repr__(self) -> str:
+ return "NOT_GIVEN"
+
+
+not_given = NotGiven()
+# for backwards compatibility:
+NOT_GIVEN = NotGiven()
+
+
+class Omit:
+ """
+ To explicitly omit something from being sent in a request, use `omit`.
+
+ ```py
+ # as the default `Content-Type` header is `application/json` that will be sent
+ client.post("/upload/files", files={"file": b"my raw file content"})
+
+ # you can't explicitly override the header as it has to be dynamically generated
+ # to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983'
+ client.post(..., headers={"Content-Type": "multipart/form-data"})
+
+ # instead you can remove the default `application/json` header by passing omit
+ client.post(..., headers={"Content-Type": omit})
+ ```
+ """
+
+ def __bool__(self) -> Literal[False]:
+ return False
+
+
+omit = Omit()
+
+
+@runtime_checkable
+class ModelBuilderProtocol(Protocol):
+ @classmethod
+ def build(
+ cls: type[_T],
+ *,
+ response: Response,
+ data: object,
+ ) -> _T: ...
+
+
+Headers = Mapping[str, Union[str, Omit]]
+
+
+class HeadersLikeProtocol(Protocol):
+ def get(self, __key: str) -> str | None: ...
+
+
+HeadersLike = Union[Headers, HeadersLikeProtocol]
+
+ResponseT = TypeVar(
+ "ResponseT",
+ bound=Union[
+ object,
+ str,
+ None,
+ "BaseModel",
+ List[Any],
+ Dict[str, Any],
+ Response,
+ ModelBuilderProtocol,
+ "APIResponse[Any]",
+ "AsyncAPIResponse[Any]",
+ ],
+)
+
+StrBytesIntFloat = Union[str, bytes, int, float]
+
+# Note: copied from Pydantic
+# https://github.com/pydantic/pydantic/blob/6f31f8f68ef011f84357330186f603ff295312fd/pydantic/main.py#L79
+IncEx: TypeAlias = Union[Set[int], Set[str], Mapping[int, Union["IncEx", bool]], Mapping[str, Union["IncEx", bool]]]
+
+PostParser = Callable[[Any], Any]
+
+
+@runtime_checkable
+class InheritsGeneric(Protocol):
+ """Represents a type that has inherited from `Generic`
+
+ The `__orig_bases__` property can be used to determine the resolved
+ type variable for a given base class.
+ """
+
+ __orig_bases__: tuple[_GenericAlias]
+
+
+class _GenericAlias(Protocol):
+ __origin__: type[object]
+
+
+class HttpxSendArgs(TypedDict, total=False):
+ auth: httpx.Auth
+ follow_redirects: bool
+
+
+_T_co = TypeVar("_T_co", covariant=True)
+
+
+if TYPE_CHECKING:
+ # This works because str.__contains__ does not accept object (either in typeshed or at runtime)
+ # https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285
+ #
+ # Note: index() and count() methods are intentionally omitted to allow pyright to properly
+ # infer TypedDict types when dict literals are used in lists assigned to SequenceNotStr.
+ class SequenceNotStr(Protocol[_T_co]):
+ @overload
+ def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
+ @overload
+ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
+ def __contains__(self, value: object, /) -> bool: ...
+ def __len__(self) -> int: ...
+ def __iter__(self) -> Iterator[_T_co]: ...
+ def __reversed__(self) -> Iterator[_T_co]: ...
+else:
+ # just point this to a normal `Sequence` at runtime to avoid having to special case
+ # deserializing our custom sequence type
+ SequenceNotStr = Sequence
diff --git a/src/gradient/_utils/__init__.py b/src/gradient/_utils/__init__.py
new file mode 100644
index 00000000..dc64e29a
--- /dev/null
+++ b/src/gradient/_utils/__init__.py
@@ -0,0 +1,64 @@
+from ._sync import asyncify as asyncify
+from ._proxy import LazyProxy as LazyProxy
+from ._utils import (
+ flatten as flatten,
+ is_dict as is_dict,
+ is_list as is_list,
+ is_given as is_given,
+ is_tuple as is_tuple,
+ json_safe as json_safe,
+ lru_cache as lru_cache,
+ is_mapping as is_mapping,
+ is_tuple_t as is_tuple_t,
+ is_iterable as is_iterable,
+ is_sequence as is_sequence,
+ coerce_float as coerce_float,
+ is_mapping_t as is_mapping_t,
+ removeprefix as removeprefix,
+ removesuffix as removesuffix,
+ extract_files as extract_files,
+ is_sequence_t as is_sequence_t,
+ required_args as required_args,
+ coerce_boolean as coerce_boolean,
+ coerce_integer as coerce_integer,
+ file_from_path as file_from_path,
+ strip_not_given as strip_not_given,
+ deepcopy_minimal as deepcopy_minimal,
+ get_async_library as get_async_library,
+ maybe_coerce_float as maybe_coerce_float,
+ get_required_header as get_required_header,
+ maybe_coerce_boolean as maybe_coerce_boolean,
+ maybe_coerce_integer as maybe_coerce_integer,
+)
+from ._compat import (
+ get_args as get_args,
+ is_union as is_union,
+ get_origin as get_origin,
+ is_typeddict as is_typeddict,
+ is_literal_type as is_literal_type,
+)
+from ._typing import (
+ is_list_type as is_list_type,
+ is_union_type as is_union_type,
+ extract_type_arg as extract_type_arg,
+ is_iterable_type as is_iterable_type,
+ is_required_type as is_required_type,
+ is_sequence_type as is_sequence_type,
+ is_annotated_type as is_annotated_type,
+ is_type_alias_type as is_type_alias_type,
+ strip_annotated_type as strip_annotated_type,
+ extract_type_var_from_base as extract_type_var_from_base,
+)
+from ._streams import consume_sync_iterator as consume_sync_iterator, consume_async_iterator as consume_async_iterator
+from ._transform import (
+ PropertyInfo as PropertyInfo,
+ transform as transform,
+ async_transform as async_transform,
+ maybe_transform as maybe_transform,
+ async_maybe_transform as async_maybe_transform,
+)
+from ._reflection import (
+ function_has_argument as function_has_argument,
+ assert_signatures_in_sync as assert_signatures_in_sync,
+)
+from ._datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
diff --git a/src/gradient/_utils/_compat.py b/src/gradient/_utils/_compat.py
new file mode 100644
index 00000000..2c70b299
--- /dev/null
+++ b/src/gradient/_utils/_compat.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+import sys
+import typing_extensions
+from typing import Any, Type, Union, Literal, Optional
+from datetime import date, datetime
+from typing_extensions import get_args as _get_args, get_origin as _get_origin
+
+from .._types import StrBytesIntFloat
+from ._datetime_parse import parse_date as _parse_date, parse_datetime as _parse_datetime
+
+_LITERAL_TYPES = {Literal, typing_extensions.Literal}
+
+
+def get_args(tp: type[Any]) -> tuple[Any, ...]:
+ return _get_args(tp)
+
+
+def get_origin(tp: type[Any]) -> type[Any] | None:
+ return _get_origin(tp)
+
+
+def is_union(tp: Optional[Type[Any]]) -> bool:
+ if sys.version_info < (3, 10):
+ return tp is Union # type: ignore[comparison-overlap]
+ else:
+ import types
+
+ return tp is Union or tp is types.UnionType # type: ignore[comparison-overlap]
+
+
+def is_typeddict(tp: Type[Any]) -> bool:
+ return typing_extensions.is_typeddict(tp)
+
+
+def is_literal_type(tp: Type[Any]) -> bool:
+ return get_origin(tp) in _LITERAL_TYPES
+
+
+def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
+ return _parse_date(value)
+
+
+def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
+ return _parse_datetime(value)
diff --git a/src/gradient/_utils/_datetime_parse.py b/src/gradient/_utils/_datetime_parse.py
new file mode 100644
index 00000000..7cb9d9e6
--- /dev/null
+++ b/src/gradient/_utils/_datetime_parse.py
@@ -0,0 +1,136 @@
+"""
+This file contains code from https://github.com/pydantic/pydantic/blob/main/pydantic/v1/datetime_parse.py
+without the Pydantic v1 specific errors.
+"""
+
+from __future__ import annotations
+
+import re
+from typing import Dict, Union, Optional
+from datetime import date, datetime, timezone, timedelta
+
+from .._types import StrBytesIntFloat
+
+date_expr = r"(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})"
+time_expr = (
+ r"(?P\d{1,2}):(?P\d{1,2})"
+ r"(?::(?P\d{1,2})(?:\.(?P\d{1,6})\d{0,6})?)?"
+ r"(?PZ|[+-]\d{2}(?::?\d{2})?)?$"
+)
+
+date_re = re.compile(f"{date_expr}$")
+datetime_re = re.compile(f"{date_expr}[T ]{time_expr}")
+
+
+EPOCH = datetime(1970, 1, 1)
+# if greater than this, the number is in ms, if less than or equal it's in seconds
+# (in seconds this is 11th October 2603, in ms it's 20th August 1970)
+MS_WATERSHED = int(2e10)
+# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9
+MAX_NUMBER = int(3e20)
+
+
+def _get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:
+ if isinstance(value, (int, float)):
+ return value
+ try:
+ return float(value)
+ except ValueError:
+ return None
+ except TypeError:
+ raise TypeError(f"invalid type; expected {native_expected_type}, string, bytes, int or float") from None
+
+
+def _from_unix_seconds(seconds: Union[int, float]) -> datetime:
+ if seconds > MAX_NUMBER:
+ return datetime.max
+ elif seconds < -MAX_NUMBER:
+ return datetime.min
+
+ while abs(seconds) > MS_WATERSHED:
+ seconds /= 1000
+ dt = EPOCH + timedelta(seconds=seconds)
+ return dt.replace(tzinfo=timezone.utc)
+
+
+def _parse_timezone(value: Optional[str]) -> Union[None, int, timezone]:
+ if value == "Z":
+ return timezone.utc
+ elif value is not None:
+ offset_mins = int(value[-2:]) if len(value) > 3 else 0
+ offset = 60 * int(value[1:3]) + offset_mins
+ if value[0] == "-":
+ offset = -offset
+ return timezone(timedelta(minutes=offset))
+ else:
+ return None
+
+
+def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
+ """
+ Parse a datetime/int/float/string and return a datetime.datetime.
+
+ This function supports time zone offsets. When the input contains one,
+ the output uses a timezone with a fixed offset from UTC.
+
+ Raise ValueError if the input is well formatted but not a valid datetime.
+ Raise ValueError if the input isn't well formatted.
+ """
+ if isinstance(value, datetime):
+ return value
+
+ number = _get_numeric(value, "datetime")
+ if number is not None:
+ return _from_unix_seconds(number)
+
+ if isinstance(value, bytes):
+ value = value.decode()
+
+ assert not isinstance(value, (float, int))
+
+ match = datetime_re.match(value)
+ if match is None:
+ raise ValueError("invalid datetime format")
+
+ kw = match.groupdict()
+ if kw["microsecond"]:
+ kw["microsecond"] = kw["microsecond"].ljust(6, "0")
+
+ tzinfo = _parse_timezone(kw.pop("tzinfo"))
+ kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
+ kw_["tzinfo"] = tzinfo
+
+ return datetime(**kw_) # type: ignore
+
+
+def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
+ """
+ Parse a date/int/float/string and return a datetime.date.
+
+ Raise ValueError if the input is well formatted but not a valid date.
+ Raise ValueError if the input isn't well formatted.
+ """
+ if isinstance(value, date):
+ if isinstance(value, datetime):
+ return value.date()
+ else:
+ return value
+
+ number = _get_numeric(value, "date")
+ if number is not None:
+ return _from_unix_seconds(number).date()
+
+ if isinstance(value, bytes):
+ value = value.decode()
+
+ assert not isinstance(value, (float, int))
+ match = date_re.match(value)
+ if match is None:
+ raise ValueError("invalid date format")
+
+ kw = {k: int(v) for k, v in match.groupdict().items()}
+
+ try:
+ return date(**kw)
+ except ValueError:
+ raise ValueError("invalid date format") from None
diff --git a/src/gradient/_utils/_json.py b/src/gradient/_utils/_json.py
new file mode 100644
index 00000000..60584214
--- /dev/null
+++ b/src/gradient/_utils/_json.py
@@ -0,0 +1,35 @@
+import json
+from typing import Any
+from datetime import datetime
+from typing_extensions import override
+
+import pydantic
+
+from .._compat import model_dump
+
+
+def openapi_dumps(obj: Any) -> bytes:
+ """
+ Serialize an object to UTF-8 encoded JSON bytes.
+
+ Extends the standard json.dumps with support for additional types
+ commonly used in the SDK, such as `datetime`, `pydantic.BaseModel`, etc.
+ """
+ return json.dumps(
+ obj,
+ cls=_CustomEncoder,
+ # Uses the same defaults as httpx's JSON serialization
+ ensure_ascii=False,
+ separators=(",", ":"),
+ allow_nan=False,
+ ).encode()
+
+
+class _CustomEncoder(json.JSONEncoder):
+ @override
+ def default(self, o: Any) -> Any:
+ if isinstance(o, datetime):
+ return o.isoformat()
+ if isinstance(o, pydantic.BaseModel):
+ return model_dump(o, exclude_unset=True, mode="json", by_alias=True)
+ return super().default(o)
diff --git a/src/gradient/_utils/_logs.py b/src/gradient/_utils/_logs.py
new file mode 100644
index 00000000..a60da7f9
--- /dev/null
+++ b/src/gradient/_utils/_logs.py
@@ -0,0 +1,25 @@
+import os
+import logging
+
+logger: logging.Logger = logging.getLogger("gradient")
+httpx_logger: logging.Logger = logging.getLogger("httpx")
+
+
+def _basic_config() -> None:
+ # e.g. [2023-10-05 14:12:26 - gradient._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK"
+ logging.basicConfig(
+ format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ )
+
+
+def setup_logging() -> None:
+ env = os.environ.get("GRADIENT_LOG")
+ if env == "debug":
+ _basic_config()
+ logger.setLevel(logging.DEBUG)
+ httpx_logger.setLevel(logging.DEBUG)
+ elif env == "info":
+ _basic_config()
+ logger.setLevel(logging.INFO)
+ httpx_logger.setLevel(logging.INFO)
diff --git a/src/digitalocean_genai_sdk/_utils/_proxy.py b/src/gradient/_utils/_proxy.py
similarity index 100%
rename from src/digitalocean_genai_sdk/_utils/_proxy.py
rename to src/gradient/_utils/_proxy.py
diff --git a/src/digitalocean_genai_sdk/_utils/_reflection.py b/src/gradient/_utils/_reflection.py
similarity index 100%
rename from src/digitalocean_genai_sdk/_utils/_reflection.py
rename to src/gradient/_utils/_reflection.py
diff --git a/src/gradient/_utils/_resources_proxy.py b/src/gradient/_utils/_resources_proxy.py
new file mode 100644
index 00000000..bf3e570d
--- /dev/null
+++ b/src/gradient/_utils/_resources_proxy.py
@@ -0,0 +1,24 @@
+from __future__ import annotations
+
+from typing import Any
+from typing_extensions import override
+
+from ._proxy import LazyProxy
+
+
+class ResourcesProxy(LazyProxy[Any]):
+ """A proxy for the `gradient.resources` module.
+
+ This is used so that we can lazily import `gradient.resources` only when
+ needed *and* so that users can just import `gradient` and reference `gradient.resources`
+ """
+
+ @override
+ def __load__(self) -> Any:
+ import importlib
+
+ mod = importlib.import_module("gradient.resources")
+ return mod
+
+
+resources = ResourcesProxy().__as_proxied__()
diff --git a/src/digitalocean_genai_sdk/_utils/_streams.py b/src/gradient/_utils/_streams.py
similarity index 100%
rename from src/digitalocean_genai_sdk/_utils/_streams.py
rename to src/gradient/_utils/_streams.py
diff --git a/src/gradient/_utils/_sync.py b/src/gradient/_utils/_sync.py
new file mode 100644
index 00000000..f6027c18
--- /dev/null
+++ b/src/gradient/_utils/_sync.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+import asyncio
+import functools
+from typing import TypeVar, Callable, Awaitable
+from typing_extensions import ParamSpec
+
+import anyio
+import sniffio
+import anyio.to_thread
+
+T_Retval = TypeVar("T_Retval")
+T_ParamSpec = ParamSpec("T_ParamSpec")
+
+
+async def to_thread(
+ func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
+) -> T_Retval:
+ if sniffio.current_async_library() == "asyncio":
+ return await asyncio.to_thread(func, *args, **kwargs)
+
+ return await anyio.to_thread.run_sync(
+ functools.partial(func, *args, **kwargs),
+ )
+
+
+# inspired by `asyncer`, https://github.com/tiangolo/asyncer
+def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]:
+ """
+ Take a blocking function and create an async one that receives the same
+ positional and keyword arguments.
+
+ Usage:
+
+ ```python
+ def blocking_func(arg1, arg2, kwarg1=None):
+ # blocking code
+ return result
+
+
+ result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1)
+ ```
+
+ ## Arguments
+
+ `function`: a blocking regular callable (e.g. a function)
+
+ ## Return
+
+ An async function that takes the same positional and keyword arguments as the
+ original one, that when called runs the same original function in a thread worker
+ and returns the result.
+ """
+
+ async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval:
+ return await to_thread(function, *args, **kwargs)
+
+ return wrapper
diff --git a/src/digitalocean_genai_sdk/_utils/_transform.py b/src/gradient/_utils/_transform.py
similarity index 96%
rename from src/digitalocean_genai_sdk/_utils/_transform.py
rename to src/gradient/_utils/_transform.py
index b0cc20a7..52075492 100644
--- a/src/digitalocean_genai_sdk/_utils/_transform.py
+++ b/src/gradient/_utils/_transform.py
@@ -16,18 +16,20 @@
lru_cache,
is_mapping,
is_iterable,
+ is_sequence,
)
from .._files import is_base64_file_input
+from ._compat import get_origin, is_typeddict
from ._typing import (
is_list_type,
is_union_type,
extract_type_arg,
is_iterable_type,
is_required_type,
+ is_sequence_type,
is_annotated_type,
strip_annotated_type,
)
-from .._compat import get_origin, model_dump, is_typeddict
_T = TypeVar("_T")
@@ -167,6 +169,8 @@ def _transform_recursive(
Defaults to the same value as the `annotation` argument.
"""
+ from .._compat import model_dump
+
if inner_type is None:
inner_type = annotation
@@ -184,6 +188,8 @@ def _transform_recursive(
(is_list_type(stripped_type) and is_list(data))
# Iterable[T]
or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
+ # Sequence[T]
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
):
# dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
# intended as an iterable, so we don't transform it.
@@ -262,7 +268,7 @@ def _transform_typeddict(
annotations = get_type_hints(expected_type, include_extras=True)
for key, value in data.items():
if not is_given(value):
- # we don't need to include `NotGiven` values here as they'll
+ # we don't need to include omitted values here as they'll
# be stripped out before the request is sent anyway
continue
@@ -329,6 +335,8 @@ async def _async_transform_recursive(
Defaults to the same value as the `annotation` argument.
"""
+ from .._compat import model_dump
+
if inner_type is None:
inner_type = annotation
@@ -346,6 +354,8 @@ async def _async_transform_recursive(
(is_list_type(stripped_type) and is_list(data))
# Iterable[T]
or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
+ # Sequence[T]
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
):
# dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
# intended as an iterable, so we don't transform it.
@@ -424,7 +434,7 @@ async def _async_transform_typeddict(
annotations = get_type_hints(expected_type, include_extras=True)
for key, value in data.items():
if not is_given(value):
- # we don't need to include `NotGiven` values here as they'll
+ # we don't need to include omitted values here as they'll
# be stripped out before the request is sent anyway
continue
diff --git a/src/digitalocean_genai_sdk/_utils/_typing.py b/src/gradient/_utils/_typing.py
similarity index 95%
rename from src/digitalocean_genai_sdk/_utils/_typing.py
rename to src/gradient/_utils/_typing.py
index 1bac9542..193109f3 100644
--- a/src/digitalocean_genai_sdk/_utils/_typing.py
+++ b/src/gradient/_utils/_typing.py
@@ -15,7 +15,7 @@
from ._utils import lru_cache
from .._types import InheritsGeneric
-from .._compat import is_union as _is_union
+from ._compat import is_union as _is_union
def is_annotated_type(typ: type) -> bool:
@@ -26,6 +26,11 @@ def is_list_type(typ: type) -> bool:
return (get_origin(typ) or typ) == list
+def is_sequence_type(typ: type) -> bool:
+ origin = get_origin(typ) or typ
+ return origin == typing_extensions.Sequence or origin == typing.Sequence or origin == _c_abc.Sequence
+
+
def is_iterable_type(typ: type) -> bool:
"""If the given type is `typing.Iterable[T]`"""
origin = get_origin(typ) or typ
diff --git a/src/digitalocean_genai_sdk/_utils/_utils.py b/src/gradient/_utils/_utils.py
similarity index 97%
rename from src/digitalocean_genai_sdk/_utils/_utils.py
rename to src/gradient/_utils/_utils.py
index ea3cf3f2..eec7f4a1 100644
--- a/src/digitalocean_genai_sdk/_utils/_utils.py
+++ b/src/gradient/_utils/_utils.py
@@ -21,8 +21,7 @@
import sniffio
-from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike
-from .._compat import parse_date as parse_date, parse_datetime as parse_datetime
+from .._types import Omit, NotGiven, FileTypes, HeadersLike
_T = TypeVar("_T")
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
@@ -64,7 +63,7 @@ def _extract_items(
try:
key = path[index]
except IndexError:
- if isinstance(obj, NotGiven):
+ if not is_given(obj):
# no value was provided - we can safely ignore
return []
@@ -127,14 +126,14 @@ def _extract_items(
return []
-def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]:
- return not isinstance(obj, NotGiven)
+def is_given(obj: _T | NotGiven | Omit) -> TypeGuard[_T]:
+ return not isinstance(obj, NotGiven) and not isinstance(obj, Omit)
# Type safe methods for narrowing types with TypeVars.
# The default narrowing for isinstance(obj, dict) is dict[unknown, unknown],
# however this cause Pyright to rightfully report errors. As we know we don't
-# care about the contained types we can safely use `object` in it's place.
+# care about the contained types we can safely use `object` in its place.
#
# There are two separate functions defined, `is_*` and `is_*_t` for different use cases.
# `is_*` is for when you're dealing with an unknown input
diff --git a/src/gradient/_version.py b/src/gradient/_version.py
new file mode 100644
index 00000000..3b4b7def
--- /dev/null
+++ b/src/gradient/_version.py
@@ -0,0 +1,4 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+__title__ = "gradient"
+__version__ = "3.12.0" # x-release-please-version
diff --git a/src/digitalocean_genai_sdk/lib/.keep b/src/gradient/lib/.keep
similarity index 100%
rename from src/digitalocean_genai_sdk/lib/.keep
rename to src/gradient/lib/.keep
diff --git a/src/digitalocean_genai_sdk/py.typed b/src/gradient/py.typed
similarity index 100%
rename from src/digitalocean_genai_sdk/py.typed
rename to src/gradient/py.typed
diff --git a/src/gradient/resources/__init__.py b/src/gradient/resources/__init__.py
new file mode 100644
index 00000000..6f218273
--- /dev/null
+++ b/src/gradient/resources/__init__.py
@@ -0,0 +1,201 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .nfs import (
+ NfsResource,
+ AsyncNfsResource,
+ NfsResourceWithRawResponse,
+ AsyncNfsResourceWithRawResponse,
+ NfsResourceWithStreamingResponse,
+ AsyncNfsResourceWithStreamingResponse,
+)
+from .apps import (
+ AppsResource,
+ AsyncAppsResource,
+ AppsResourceWithRawResponse,
+ AsyncAppsResourceWithRawResponse,
+ AppsResourceWithStreamingResponse,
+ AsyncAppsResourceWithStreamingResponse,
+)
+from .chat import (
+ ChatResource,
+ AsyncChatResource,
+ ChatResourceWithRawResponse,
+ AsyncChatResourceWithRawResponse,
+ ChatResourceWithStreamingResponse,
+ AsyncChatResourceWithStreamingResponse,
+)
+from .agents import (
+ AgentsResource,
+ AsyncAgentsResource,
+ AgentsResourceWithRawResponse,
+ AsyncAgentsResourceWithRawResponse,
+ AgentsResourceWithStreamingResponse,
+ AsyncAgentsResourceWithStreamingResponse,
+)
+from .images import (
+ ImagesResource,
+ AsyncImagesResource,
+ ImagesResourceWithRawResponse,
+ AsyncImagesResourceWithRawResponse,
+ ImagesResourceWithStreamingResponse,
+ AsyncImagesResourceWithStreamingResponse,
+)
+from .models import (
+ ModelsResource,
+ AsyncModelsResource,
+ ModelsResourceWithRawResponse,
+ AsyncModelsResourceWithRawResponse,
+ ModelsResourceWithStreamingResponse,
+ AsyncModelsResourceWithStreamingResponse,
+)
+from .billing import (
+ BillingResource,
+ AsyncBillingResource,
+ BillingResourceWithRawResponse,
+ AsyncBillingResourceWithRawResponse,
+ BillingResourceWithStreamingResponse,
+ AsyncBillingResourceWithStreamingResponse,
+)
+from .regions import (
+ RegionsResource,
+ AsyncRegionsResource,
+ RegionsResourceWithRawResponse,
+ AsyncRegionsResourceWithRawResponse,
+ RegionsResourceWithStreamingResponse,
+ AsyncRegionsResourceWithStreamingResponse,
+)
+from .retrieve import (
+ RetrieveResource,
+ AsyncRetrieveResource,
+ RetrieveResourceWithRawResponse,
+ AsyncRetrieveResourceWithRawResponse,
+ RetrieveResourceWithStreamingResponse,
+ AsyncRetrieveResourceWithStreamingResponse,
+)
+from .databases import (
+ DatabasesResource,
+ AsyncDatabasesResource,
+ DatabasesResourceWithRawResponse,
+ AsyncDatabasesResourceWithRawResponse,
+ DatabasesResourceWithStreamingResponse,
+ AsyncDatabasesResourceWithStreamingResponse,
+)
+from .inference import (
+ InferenceResource,
+ AsyncInferenceResource,
+ InferenceResourceWithRawResponse,
+ AsyncInferenceResourceWithRawResponse,
+ InferenceResourceWithStreamingResponse,
+ AsyncInferenceResourceWithStreamingResponse,
+)
+from .responses import (
+ ResponsesResource,
+ AsyncResponsesResource,
+ ResponsesResourceWithRawResponse,
+ AsyncResponsesResourceWithRawResponse,
+ ResponsesResourceWithStreamingResponse,
+ AsyncResponsesResourceWithStreamingResponse,
+)
+from .gpu_droplets import (
+ GPUDropletsResource,
+ AsyncGPUDropletsResource,
+ GPUDropletsResourceWithRawResponse,
+ AsyncGPUDropletsResourceWithRawResponse,
+ GPUDropletsResourceWithStreamingResponse,
+ AsyncGPUDropletsResourceWithStreamingResponse,
+)
+from .knowledge_bases import (
+ KnowledgeBasesResource,
+ AsyncKnowledgeBasesResource,
+ KnowledgeBasesResourceWithRawResponse,
+ AsyncKnowledgeBasesResourceWithRawResponse,
+ KnowledgeBasesResourceWithStreamingResponse,
+ AsyncKnowledgeBasesResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "AgentsResource",
+ "AsyncAgentsResource",
+ "AgentsResourceWithRawResponse",
+ "AsyncAgentsResourceWithRawResponse",
+ "AgentsResourceWithStreamingResponse",
+ "AsyncAgentsResourceWithStreamingResponse",
+ "ChatResource",
+ "AsyncChatResource",
+ "ChatResourceWithRawResponse",
+ "AsyncChatResourceWithRawResponse",
+ "ChatResourceWithStreamingResponse",
+ "AsyncChatResourceWithStreamingResponse",
+ "ImagesResource",
+ "AsyncImagesResource",
+ "ImagesResourceWithRawResponse",
+ "AsyncImagesResourceWithRawResponse",
+ "ImagesResourceWithStreamingResponse",
+ "AsyncImagesResourceWithStreamingResponse",
+ "ResponsesResource",
+ "AsyncResponsesResource",
+ "ResponsesResourceWithRawResponse",
+ "AsyncResponsesResourceWithRawResponse",
+ "ResponsesResourceWithStreamingResponse",
+ "AsyncResponsesResourceWithStreamingResponse",
+ "GPUDropletsResource",
+ "AsyncGPUDropletsResource",
+ "GPUDropletsResourceWithRawResponse",
+ "AsyncGPUDropletsResourceWithRawResponse",
+ "GPUDropletsResourceWithStreamingResponse",
+ "AsyncGPUDropletsResourceWithStreamingResponse",
+ "InferenceResource",
+ "AsyncInferenceResource",
+ "InferenceResourceWithRawResponse",
+ "AsyncInferenceResourceWithRawResponse",
+ "InferenceResourceWithStreamingResponse",
+ "AsyncInferenceResourceWithStreamingResponse",
+ "KnowledgeBasesResource",
+ "AsyncKnowledgeBasesResource",
+ "KnowledgeBasesResourceWithRawResponse",
+ "AsyncKnowledgeBasesResourceWithRawResponse",
+ "KnowledgeBasesResourceWithStreamingResponse",
+ "AsyncKnowledgeBasesResourceWithStreamingResponse",
+ "ModelsResource",
+ "AsyncModelsResource",
+ "ModelsResourceWithRawResponse",
+ "AsyncModelsResourceWithRawResponse",
+ "ModelsResourceWithStreamingResponse",
+ "AsyncModelsResourceWithStreamingResponse",
+ "RegionsResource",
+ "AsyncRegionsResource",
+ "RegionsResourceWithRawResponse",
+ "AsyncRegionsResourceWithRawResponse",
+ "RegionsResourceWithStreamingResponse",
+ "AsyncRegionsResourceWithStreamingResponse",
+ "DatabasesResource",
+ "AsyncDatabasesResource",
+ "DatabasesResourceWithRawResponse",
+ "AsyncDatabasesResourceWithRawResponse",
+ "DatabasesResourceWithStreamingResponse",
+ "AsyncDatabasesResourceWithStreamingResponse",
+ "NfsResource",
+ "AsyncNfsResource",
+ "NfsResourceWithRawResponse",
+ "AsyncNfsResourceWithRawResponse",
+ "NfsResourceWithStreamingResponse",
+ "AsyncNfsResourceWithStreamingResponse",
+ "RetrieveResource",
+ "AsyncRetrieveResource",
+ "RetrieveResourceWithRawResponse",
+ "AsyncRetrieveResourceWithRawResponse",
+ "RetrieveResourceWithStreamingResponse",
+ "AsyncRetrieveResourceWithStreamingResponse",
+ "AppsResource",
+ "AsyncAppsResource",
+ "AppsResourceWithRawResponse",
+ "AsyncAppsResourceWithRawResponse",
+ "AppsResourceWithStreamingResponse",
+ "AsyncAppsResourceWithStreamingResponse",
+ "BillingResource",
+ "AsyncBillingResource",
+ "BillingResourceWithRawResponse",
+ "AsyncBillingResourceWithRawResponse",
+ "BillingResourceWithStreamingResponse",
+ "AsyncBillingResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/agents/__init__.py b/src/gradient/resources/agents/__init__.py
new file mode 100644
index 00000000..51075283
--- /dev/null
+++ b/src/gradient/resources/agents/__init__.py
@@ -0,0 +1,159 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .chat import (
+ ChatResource,
+ AsyncChatResource,
+ ChatResourceWithRawResponse,
+ AsyncChatResourceWithRawResponse,
+ ChatResourceWithStreamingResponse,
+ AsyncChatResourceWithStreamingResponse,
+)
+from .agents import (
+ AgentsResource,
+ AsyncAgentsResource,
+ AgentsResourceWithRawResponse,
+ AsyncAgentsResourceWithRawResponse,
+ AgentsResourceWithStreamingResponse,
+ AsyncAgentsResourceWithStreamingResponse,
+)
+from .routes import (
+ RoutesResource,
+ AsyncRoutesResource,
+ RoutesResourceWithRawResponse,
+ AsyncRoutesResourceWithRawResponse,
+ RoutesResourceWithStreamingResponse,
+ AsyncRoutesResourceWithStreamingResponse,
+)
+from .api_keys import (
+ APIKeysResource,
+ AsyncAPIKeysResource,
+ APIKeysResourceWithRawResponse,
+ AsyncAPIKeysResourceWithRawResponse,
+ APIKeysResourceWithStreamingResponse,
+ AsyncAPIKeysResourceWithStreamingResponse,
+)
+from .versions import (
+ VersionsResource,
+ AsyncVersionsResource,
+ VersionsResourceWithRawResponse,
+ AsyncVersionsResourceWithRawResponse,
+ VersionsResourceWithStreamingResponse,
+ AsyncVersionsResourceWithStreamingResponse,
+)
+from .functions import (
+ FunctionsResource,
+ AsyncFunctionsResource,
+ FunctionsResourceWithRawResponse,
+ AsyncFunctionsResourceWithRawResponse,
+ FunctionsResourceWithStreamingResponse,
+ AsyncFunctionsResourceWithStreamingResponse,
+)
+from .evaluation_runs import (
+ EvaluationRunsResource,
+ AsyncEvaluationRunsResource,
+ EvaluationRunsResourceWithRawResponse,
+ AsyncEvaluationRunsResourceWithRawResponse,
+ EvaluationRunsResourceWithStreamingResponse,
+ AsyncEvaluationRunsResourceWithStreamingResponse,
+)
+from .knowledge_bases import (
+ KnowledgeBasesResource,
+ AsyncKnowledgeBasesResource,
+ KnowledgeBasesResourceWithRawResponse,
+ AsyncKnowledgeBasesResourceWithRawResponse,
+ KnowledgeBasesResourceWithStreamingResponse,
+ AsyncKnowledgeBasesResourceWithStreamingResponse,
+)
+from .evaluation_metrics import (
+ EvaluationMetricsResource,
+ AsyncEvaluationMetricsResource,
+ EvaluationMetricsResourceWithRawResponse,
+ AsyncEvaluationMetricsResourceWithRawResponse,
+ EvaluationMetricsResourceWithStreamingResponse,
+ AsyncEvaluationMetricsResourceWithStreamingResponse,
+)
+from .evaluation_datasets import (
+ EvaluationDatasetsResource,
+ AsyncEvaluationDatasetsResource,
+ EvaluationDatasetsResourceWithRawResponse,
+ AsyncEvaluationDatasetsResourceWithRawResponse,
+ EvaluationDatasetsResourceWithStreamingResponse,
+ AsyncEvaluationDatasetsResourceWithStreamingResponse,
+)
+from .evaluation_test_cases import (
+ EvaluationTestCasesResource,
+ AsyncEvaluationTestCasesResource,
+ EvaluationTestCasesResourceWithRawResponse,
+ AsyncEvaluationTestCasesResourceWithRawResponse,
+ EvaluationTestCasesResourceWithStreamingResponse,
+ AsyncEvaluationTestCasesResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "APIKeysResource",
+ "AsyncAPIKeysResource",
+ "APIKeysResourceWithRawResponse",
+ "AsyncAPIKeysResourceWithRawResponse",
+ "APIKeysResourceWithStreamingResponse",
+ "AsyncAPIKeysResourceWithStreamingResponse",
+ "ChatResource",
+ "AsyncChatResource",
+ "ChatResourceWithRawResponse",
+ "AsyncChatResourceWithRawResponse",
+ "ChatResourceWithStreamingResponse",
+ "AsyncChatResourceWithStreamingResponse",
+ "EvaluationMetricsResource",
+ "AsyncEvaluationMetricsResource",
+ "EvaluationMetricsResourceWithRawResponse",
+ "AsyncEvaluationMetricsResourceWithRawResponse",
+ "EvaluationMetricsResourceWithStreamingResponse",
+ "AsyncEvaluationMetricsResourceWithStreamingResponse",
+ "EvaluationRunsResource",
+ "AsyncEvaluationRunsResource",
+ "EvaluationRunsResourceWithRawResponse",
+ "AsyncEvaluationRunsResourceWithRawResponse",
+ "EvaluationRunsResourceWithStreamingResponse",
+ "AsyncEvaluationRunsResourceWithStreamingResponse",
+ "EvaluationTestCasesResource",
+ "AsyncEvaluationTestCasesResource",
+ "EvaluationTestCasesResourceWithRawResponse",
+ "AsyncEvaluationTestCasesResourceWithRawResponse",
+ "EvaluationTestCasesResourceWithStreamingResponse",
+ "AsyncEvaluationTestCasesResourceWithStreamingResponse",
+ "EvaluationDatasetsResource",
+ "AsyncEvaluationDatasetsResource",
+ "EvaluationDatasetsResourceWithRawResponse",
+ "AsyncEvaluationDatasetsResourceWithRawResponse",
+ "EvaluationDatasetsResourceWithStreamingResponse",
+ "AsyncEvaluationDatasetsResourceWithStreamingResponse",
+ "FunctionsResource",
+ "AsyncFunctionsResource",
+ "FunctionsResourceWithRawResponse",
+ "AsyncFunctionsResourceWithRawResponse",
+ "FunctionsResourceWithStreamingResponse",
+ "AsyncFunctionsResourceWithStreamingResponse",
+ "VersionsResource",
+ "AsyncVersionsResource",
+ "VersionsResourceWithRawResponse",
+ "AsyncVersionsResourceWithRawResponse",
+ "VersionsResourceWithStreamingResponse",
+ "AsyncVersionsResourceWithStreamingResponse",
+ "KnowledgeBasesResource",
+ "AsyncKnowledgeBasesResource",
+ "KnowledgeBasesResourceWithRawResponse",
+ "AsyncKnowledgeBasesResourceWithRawResponse",
+ "KnowledgeBasesResourceWithStreamingResponse",
+ "AsyncKnowledgeBasesResourceWithStreamingResponse",
+ "RoutesResource",
+ "AsyncRoutesResource",
+ "RoutesResourceWithRawResponse",
+ "AsyncRoutesResourceWithRawResponse",
+ "RoutesResourceWithStreamingResponse",
+ "AsyncRoutesResourceWithStreamingResponse",
+ "AgentsResource",
+ "AsyncAgentsResource",
+ "AgentsResourceWithRawResponse",
+ "AsyncAgentsResourceWithRawResponse",
+ "AgentsResourceWithStreamingResponse",
+ "AsyncAgentsResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/agents/agents.py b/src/gradient/resources/agents/agents.py
new file mode 100644
index 00000000..e6790b29
--- /dev/null
+++ b/src/gradient/resources/agents/agents.py
@@ -0,0 +1,1764 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from __future__ import annotations
+
+import time
+
+import httpx
+
+from .routes import (
+ RoutesResource,
+ AsyncRoutesResource,
+ RoutesResourceWithRawResponse,
+ AsyncRoutesResourceWithRawResponse,
+ RoutesResourceWithStreamingResponse,
+ AsyncRoutesResourceWithStreamingResponse,
+)
+from ...types import (
+ APIRetrievalMethod,
+ APIDeploymentVisibility,
+ agent_list_params,
+ agent_create_params,
+ agent_update_params,
+ agent_update_status_params,
+ agent_retrieve_usage_params,
+)
+from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from .api_keys import (
+ APIKeysResource,
+ AsyncAPIKeysResource,
+ APIKeysResourceWithRawResponse,
+ AsyncAPIKeysResourceWithRawResponse,
+ APIKeysResourceWithStreamingResponse,
+ AsyncAPIKeysResourceWithStreamingResponse,
+)
+from .versions import (
+ VersionsResource,
+ AsyncVersionsResource,
+ VersionsResourceWithRawResponse,
+ AsyncVersionsResourceWithRawResponse,
+ VersionsResourceWithStreamingResponse,
+ AsyncVersionsResourceWithStreamingResponse,
+)
+from ..._compat import cached_property
+from .chat.chat import (
+ ChatResource,
+ AsyncChatResource,
+ ChatResourceWithRawResponse,
+ AsyncChatResourceWithRawResponse,
+ ChatResourceWithStreamingResponse,
+ AsyncChatResourceWithStreamingResponse,
+)
+from .functions import (
+ FunctionsResource,
+ AsyncFunctionsResource,
+ FunctionsResourceWithRawResponse,
+ AsyncFunctionsResourceWithRawResponse,
+ FunctionsResourceWithStreamingResponse,
+ AsyncFunctionsResourceWithStreamingResponse,
+)
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from .evaluation_runs import (
+ EvaluationRunsResource,
+ AsyncEvaluationRunsResource,
+ EvaluationRunsResourceWithRawResponse,
+ AsyncEvaluationRunsResourceWithRawResponse,
+ EvaluationRunsResourceWithStreamingResponse,
+ AsyncEvaluationRunsResourceWithStreamingResponse,
+)
+from .knowledge_bases import (
+ KnowledgeBasesResource,
+ AsyncKnowledgeBasesResource,
+ KnowledgeBasesResourceWithRawResponse,
+ AsyncKnowledgeBasesResourceWithRawResponse,
+ KnowledgeBasesResourceWithStreamingResponse,
+ AsyncKnowledgeBasesResourceWithStreamingResponse,
+)
+from .evaluation_datasets import (
+ EvaluationDatasetsResource,
+ AsyncEvaluationDatasetsResource,
+ EvaluationDatasetsResourceWithRawResponse,
+ AsyncEvaluationDatasetsResourceWithRawResponse,
+ EvaluationDatasetsResourceWithStreamingResponse,
+ AsyncEvaluationDatasetsResourceWithStreamingResponse,
+)
+from .evaluation_test_cases import (
+ EvaluationTestCasesResource,
+ AsyncEvaluationTestCasesResource,
+ EvaluationTestCasesResourceWithRawResponse,
+ AsyncEvaluationTestCasesResourceWithRawResponse,
+ EvaluationTestCasesResourceWithStreamingResponse,
+ AsyncEvaluationTestCasesResourceWithStreamingResponse,
+)
+from ...types.agent_list_response import AgentListResponse
+from ...types.api_retrieval_method import APIRetrievalMethod
+from ...types.agent_create_response import AgentCreateResponse
+from ...types.agent_delete_response import AgentDeleteResponse
+from ...types.agent_update_response import AgentUpdateResponse
+from ...types.agent_retrieve_response import AgentRetrieveResponse
+from ...types.api_deployment_visibility import APIDeploymentVisibility
+from ...types.agent_update_status_response import AgentUpdateStatusResponse
+from ...types.agent_retrieve_usage_response import AgentRetrieveUsageResponse
+from .evaluation_metrics.evaluation_metrics import (
+ EvaluationMetricsResource,
+ AsyncEvaluationMetricsResource,
+ EvaluationMetricsResourceWithRawResponse,
+ AsyncEvaluationMetricsResourceWithRawResponse,
+ EvaluationMetricsResourceWithStreamingResponse,
+ AsyncEvaluationMetricsResourceWithStreamingResponse,
+)
+
+__all__ = ["AgentsResource", "AsyncAgentsResource"]
+
+
+class AgentsResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def api_keys(self) -> APIKeysResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return APIKeysResource(self._client)
+
+ @cached_property
+ def chat(self) -> ChatResource:
+ return ChatResource(self._client)
+
+ @cached_property
+ def evaluation_metrics(self) -> EvaluationMetricsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationMetricsResource(self._client)
+
+ @cached_property
+ def evaluation_runs(self) -> EvaluationRunsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationRunsResource(self._client)
+
+ @cached_property
+ def evaluation_test_cases(self) -> EvaluationTestCasesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationTestCasesResource(self._client)
+
+ @cached_property
+ def evaluation_datasets(self) -> EvaluationDatasetsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationDatasetsResource(self._client)
+
+ @cached_property
+ def functions(self) -> FunctionsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return FunctionsResource(self._client)
+
+ @cached_property
+ def versions(self) -> VersionsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return VersionsResource(self._client)
+
+ @cached_property
+ def knowledge_bases(self) -> KnowledgeBasesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return KnowledgeBasesResource(self._client)
+
+ @cached_property
+ def routes(self) -> RoutesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return RoutesResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AgentsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AgentsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AgentsResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ anthropic_key_uuid: str | Omit = omit,
+ description: str | Omit = omit,
+ instruction: str | Omit = omit,
+ knowledge_base_uuid: SequenceNotStr[str] | Omit = omit,
+ model_provider_key_uuid: str | Omit = omit,
+ model_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ openai_key_uuid: str | Omit = omit,
+ project_id: str | Omit = omit,
+ region: str | Omit = omit,
+ tags: SequenceNotStr[str] | Omit = omit,
+ workspace_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentCreateResponse:
+ """To create a new agent, send a POST request to `/v2/gen-ai/agents`.
+
+ The response
+ body contains a JSON object with the newly created agent object.
+
+ Args:
+ anthropic_key_uuid: Optional Anthropic API key ID to use with Anthropic models
+
+ description: A text description of the agent, not used in inference
+
+ instruction: Agent instruction. Instructions help your agent to perform its job effectively.
+ See
+ [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions)
+ for best practices.
+
+ knowledge_base_uuid: Ids of the knowledge base(s) to attach to the agent
+
+ model_uuid: Identifier for the foundation model.
+
+ name: Agent name
+
+ openai_key_uuid: Optional OpenAI API key ID to use with OpenAI models
+
+ project_id: The id of the DigitalOcean project this agent will belong to
+
+ region: The DigitalOcean region to deploy your agent in
+
+ tags: Agent tag to organize related resources
+
+ workspace_uuid: Identifier for the workspace
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/agents"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/agents",
+ body=maybe_transform(
+ {
+ "anthropic_key_uuid": anthropic_key_uuid,
+ "description": description,
+ "instruction": instruction,
+ "knowledge_base_uuid": knowledge_base_uuid,
+ "model_provider_key_uuid": model_provider_key_uuid,
+ "model_uuid": model_uuid,
+ "name": name,
+ "openai_key_uuid": openai_key_uuid,
+ "project_id": project_id,
+ "region": region,
+ "tags": tags,
+ "workspace_uuid": workspace_uuid,
+ },
+ agent_create_params.AgentCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentRetrieveResponse:
+ """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`.
+
+ The
+ response body is a JSON object containing the agent.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/agents/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_uuid: str,
+ *,
+ agent_log_insights_enabled: bool | Omit = omit,
+ allowed_domains: SequenceNotStr[str] | Omit = omit,
+ anthropic_key_uuid: str | Omit = omit,
+ conversation_logs_enabled: bool | Omit = omit,
+ description: str | Omit = omit,
+ instruction: str | Omit = omit,
+ k: int | Omit = omit,
+ max_tokens: int | Omit = omit,
+ model_provider_key_uuid: str | Omit = omit,
+ model_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ openai_key_uuid: str | Omit = omit,
+ project_id: str | Omit = omit,
+ provide_citations: bool | Omit = omit,
+ retrieval_method: APIRetrievalMethod | Omit = omit,
+ tags: SequenceNotStr[str] | Omit = omit,
+ temperature: float | Omit = omit,
+ top_p: float | Omit = omit,
+ body_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentUpdateResponse:
+ """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`.
+
+ The
+ response body is a JSON object containing the agent.
+
+ Args:
+ allowed_domains: Optional list of allowed domains for the chatbot - Must use fully qualified
+ domain name (FQDN) such as https://example.com
+
+ anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models
+
+ conversation_logs_enabled: Optional update of conversation logs enabled
+
+ description: Agent description
+
+ instruction: Agent instruction. Instructions help your agent to perform its job effectively.
+ See
+ [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions)
+ for best practices.
+
+ k: How many results should be considered from an attached knowledge base
+
+ max_tokens: Specifies the maximum number of tokens the model can process in a single input
+ or output, set as a number between 1 and 512. This determines the length of each
+ response.
+
+ model_provider_key_uuid: Optional Model Provider uuid for use with provider models
+
+ model_uuid: Identifier for the foundation model.
+
+ name: Agent name
+
+ openai_key_uuid: Optional OpenAI key uuid for use with OpenAI models
+
+ project_id: The id of the DigitalOcean project this agent will belong to
+
+ retrieval_method: - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown
+ - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite
+ - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back
+ - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries
+ - RETRIEVAL_METHOD_NONE: The retrieval method is none
+
+ tags: A set of abitrary tags to organize your agent
+
+ temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower
+ values produce more predictable and conservative responses, while higher values
+ encourage creativity and variation.
+
+ top_p: Defines the cumulative probability threshold for word selection, specified as a
+ number between 0 and 1. Higher values allow for more diverse outputs, while
+ lower values ensure focused and coherent responses.
+
+ body_uuid: Unique agent id
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/agents/{path_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}",
+ body=maybe_transform(
+ {
+ "agent_log_insights_enabled": agent_log_insights_enabled,
+ "allowed_domains": allowed_domains,
+ "anthropic_key_uuid": anthropic_key_uuid,
+ "conversation_logs_enabled": conversation_logs_enabled,
+ "description": description,
+ "instruction": instruction,
+ "k": k,
+ "max_tokens": max_tokens,
+ "model_provider_key_uuid": model_provider_key_uuid,
+ "model_uuid": model_uuid,
+ "name": name,
+ "openai_key_uuid": openai_key_uuid,
+ "project_id": project_id,
+ "provide_citations": provide_citations,
+ "retrieval_method": retrieval_method,
+ "tags": tags,
+ "temperature": temperature,
+ "top_p": top_p,
+ "body_uuid": body_uuid,
+ },
+ agent_update_params.AgentUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ only_deployed: bool | Omit = omit,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentListResponse:
+ """
+ To list all agents, send a GET request to `/v2/gen-ai/agents`.
+
+ Args:
+ only_deployed: Only list agents that are deployed.
+
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/agents"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "only_deployed": only_deployed,
+ "page": page,
+ "per_page": per_page,
+ },
+ agent_list_params.AgentListParams,
+ ),
+ ),
+ cast_to=AgentListResponse,
+ )
+
+ def delete(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentDeleteResponse:
+ """
+ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/agents/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentDeleteResponse,
+ )
+
+ def retrieve_usage(
+ self,
+ uuid: str,
+ *,
+ start: str | Omit = omit,
+ stop: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentRetrieveUsageResponse:
+ """
+ To get agent usage, send a GET request to `/v2/gen-ai/agents/{uuid}/usage`.
+ Returns usage metrics for the specified agent within the provided time range.
+
+ Args:
+ start: Return all usage data from this date.
+
+ stop: Return all usage data up to this date, if omitted, will return up to the current
+ date.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/agents/{uuid}/usage"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/usage",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "start": start,
+ "stop": stop,
+ },
+ agent_retrieve_usage_params.AgentRetrieveUsageParams,
+ ),
+ ),
+ cast_to=AgentRetrieveUsageResponse,
+ )
+
+ def update_status(
+ self,
+ path_uuid: str,
+ *,
+ body_uuid: str | Omit = omit,
+ visibility: APIDeploymentVisibility | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentUpdateStatusResponse:
+ """Check whether an agent is public or private.
+
+ To update the agent status, send a
+ PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`.
+
+ Args:
+ body_uuid: Unique id
+
+ visibility: - VISIBILITY_UNKNOWN: The status of the deployment is unknown
+ - VISIBILITY_DISABLED: The deployment is disabled and will no longer service
+ requests
+ - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state
+ - VISIBILITY_PUBLIC: The deployment is public and will service requests from the
+ public internet
+ - VISIBILITY_PRIVATE: The deployment is private and will only service requests
+ from other agents, or through API keys
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/deployment_visibility",
+ body=maybe_transform(
+ {
+ "body_uuid": body_uuid,
+ "visibility": visibility,
+ },
+ agent_update_status_params.AgentUpdateStatusParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentUpdateStatusResponse,
+ )
+
+ def wait_until_ready(
+ self,
+ uuid: str,
+ *,
+ timeout: float = 300.0,
+ poll_interval: float = 5.0,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ ) -> AgentRetrieveResponse:
+ """Wait for an agent to be ready (deployment status is STATUS_RUNNING).
+
+ This method polls the agent status until it reaches STATUS_RUNNING or a terminal
+ error state. It handles timeout and deployment failures automatically.
+
+ Args:
+ uuid: The unique identifier of the agent to wait for
+
+ timeout: Maximum time to wait in seconds (default: 300 seconds / 5 minutes)
+
+ poll_interval: Time to wait between status checks in seconds (default: 5 seconds)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ Returns:
+ AgentRetrieveResponse: The agent response when it reaches STATUS_RUNNING
+
+ Raises:
+ AgentDeploymentError: If the agent deployment fails (STATUS_FAILED,
+ STATUS_UNDEPLOYMENT_FAILED, or STATUS_DELETED)
+ AgentDeploymentTimeoutError: If the agent doesn't reach STATUS_RUNNING
+ within the timeout period
+ ValueError: If uuid is empty
+ """
+ from ..._exceptions import AgentDeploymentError, AgentDeploymentTimeoutError
+
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+
+ start_time = time.time()
+
+ while True:
+ agent_response = self.retrieve(
+ uuid,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ )
+
+ # Check if agent and deployment exist
+ if agent_response.agent and agent_response.agent.deployment:
+ status = agent_response.agent.deployment.status
+
+ # Success case
+ if status == "STATUS_RUNNING":
+ return agent_response
+
+ # Failure cases
+ if status in ("STATUS_FAILED", "STATUS_UNDEPLOYMENT_FAILED", "STATUS_DELETED"):
+ raise AgentDeploymentError(
+ f"Agent deployment failed with status: {status}",
+ status=status,
+ )
+
+ # Check timeout
+ elapsed_time = time.time() - start_time
+ if elapsed_time >= timeout:
+ current_status = (
+ agent_response.agent.deployment.status
+ if agent_response.agent and agent_response.agent.deployment
+ else "UNKNOWN"
+ )
+ raise AgentDeploymentTimeoutError(
+ f"Agent did not reach STATUS_RUNNING within {timeout} seconds. Current status: {current_status}",
+ agent_id=uuid,
+ )
+
+ # Wait before polling again
+ time.sleep(poll_interval)
+
+
+class AsyncAgentsResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def api_keys(self) -> AsyncAPIKeysResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAPIKeysResource(self._client)
+
+ @cached_property
+ def chat(self) -> AsyncChatResource:
+ return AsyncChatResource(self._client)
+
+ @cached_property
+ def evaluation_metrics(self) -> AsyncEvaluationMetricsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationMetricsResource(self._client)
+
+ @cached_property
+ def evaluation_runs(self) -> AsyncEvaluationRunsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationRunsResource(self._client)
+
+ @cached_property
+ def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationTestCasesResource(self._client)
+
+ @cached_property
+ def evaluation_datasets(self) -> AsyncEvaluationDatasetsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationDatasetsResource(self._client)
+
+ @cached_property
+ def functions(self) -> AsyncFunctionsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncFunctionsResource(self._client)
+
+ @cached_property
+ def versions(self) -> AsyncVersionsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncVersionsResource(self._client)
+
+ @cached_property
+ def knowledge_bases(self) -> AsyncKnowledgeBasesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncKnowledgeBasesResource(self._client)
+
+ @cached_property
+ def routes(self) -> AsyncRoutesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncRoutesResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAgentsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncAgentsResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ anthropic_key_uuid: str | Omit = omit,
+ description: str | Omit = omit,
+ instruction: str | Omit = omit,
+ knowledge_base_uuid: SequenceNotStr[str] | Omit = omit,
+ model_provider_key_uuid: str | Omit = omit,
+ model_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ openai_key_uuid: str | Omit = omit,
+ project_id: str | Omit = omit,
+ region: str | Omit = omit,
+ tags: SequenceNotStr[str] | Omit = omit,
+ workspace_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentCreateResponse:
+ """To create a new agent, send a POST request to `/v2/gen-ai/agents`.
+
+ The response
+ body contains a JSON object with the newly created agent object.
+
+ Args:
+ anthropic_key_uuid: Optional Anthropic API key ID to use with Anthropic models
+
+ description: A text description of the agent, not used in inference
+
+ instruction: Agent instruction. Instructions help your agent to perform its job effectively.
+ See
+ [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions)
+ for best practices.
+
+ knowledge_base_uuid: Ids of the knowledge base(s) to attach to the agent
+
+ model_uuid: Identifier for the foundation model.
+
+ name: Agent name
+
+ openai_key_uuid: Optional OpenAI API key ID to use with OpenAI models
+
+ project_id: The id of the DigitalOcean project this agent will belong to
+
+ region: The DigitalOcean region to deploy your agent in
+
+ tags: Agent tag to organize related resources
+
+ workspace_uuid: Identifier for the workspace
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/agents"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/agents",
+ body=await async_maybe_transform(
+ {
+ "anthropic_key_uuid": anthropic_key_uuid,
+ "description": description,
+ "instruction": instruction,
+ "knowledge_base_uuid": knowledge_base_uuid,
+ "model_provider_key_uuid": model_provider_key_uuid,
+ "model_uuid": model_uuid,
+ "name": name,
+ "openai_key_uuid": openai_key_uuid,
+ "project_id": project_id,
+ "region": region,
+ "tags": tags,
+ "workspace_uuid": workspace_uuid,
+ },
+ agent_create_params.AgentCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentRetrieveResponse:
+ """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`.
+
+ The
+ response body is a JSON object containing the agent.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/agents/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_uuid: str,
+ *,
+ agent_log_insights_enabled: bool | Omit = omit,
+ allowed_domains: SequenceNotStr[str] | Omit = omit,
+ anthropic_key_uuid: str | Omit = omit,
+ conversation_logs_enabled: bool | Omit = omit,
+ description: str | Omit = omit,
+ instruction: str | Omit = omit,
+ k: int | Omit = omit,
+ max_tokens: int | Omit = omit,
+ model_provider_key_uuid: str | Omit = omit,
+ model_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ openai_key_uuid: str | Omit = omit,
+ project_id: str | Omit = omit,
+ provide_citations: bool | Omit = omit,
+ retrieval_method: APIRetrievalMethod | Omit = omit,
+ tags: SequenceNotStr[str] | Omit = omit,
+ temperature: float | Omit = omit,
+ top_p: float | Omit = omit,
+ body_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentUpdateResponse:
+ """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`.
+
+ The
+ response body is a JSON object containing the agent.
+
+ Args:
+ allowed_domains: Optional list of allowed domains for the chatbot - Must use fully qualified
+ domain name (FQDN) such as https://example.com
+
+ anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models
+
+ conversation_logs_enabled: Optional update of conversation logs enabled
+
+ description: Agent description
+
+ instruction: Agent instruction. Instructions help your agent to perform its job effectively.
+ See
+ [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions)
+ for best practices.
+
+ k: How many results should be considered from an attached knowledge base
+
+ max_tokens: Specifies the maximum number of tokens the model can process in a single input
+ or output, set as a number between 1 and 512. This determines the length of each
+ response.
+
+ model_provider_key_uuid: Optional Model Provider uuid for use with provider models
+
+ model_uuid: Identifier for the foundation model.
+
+ name: Agent name
+
+ openai_key_uuid: Optional OpenAI key uuid for use with OpenAI models
+
+ project_id: The id of the DigitalOcean project this agent will belong to
+
+ retrieval_method: - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown
+ - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite
+ - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back
+ - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries
+ - RETRIEVAL_METHOD_NONE: The retrieval method is none
+
+ tags: A set of abitrary tags to organize your agent
+
+ temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower
+ values produce more predictable and conservative responses, while higher values
+ encourage creativity and variation.
+
+ top_p: Defines the cumulative probability threshold for word selection, specified as a
+ number between 0 and 1. Higher values allow for more diverse outputs, while
+ lower values ensure focused and coherent responses.
+
+ body_uuid: Unique agent id
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/agents/{path_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}",
+ body=await async_maybe_transform(
+ {
+ "agent_log_insights_enabled": agent_log_insights_enabled,
+ "allowed_domains": allowed_domains,
+ "anthropic_key_uuid": anthropic_key_uuid,
+ "conversation_logs_enabled": conversation_logs_enabled,
+ "description": description,
+ "instruction": instruction,
+ "k": k,
+ "max_tokens": max_tokens,
+ "model_provider_key_uuid": model_provider_key_uuid,
+ "model_uuid": model_uuid,
+ "name": name,
+ "openai_key_uuid": openai_key_uuid,
+ "project_id": project_id,
+ "provide_citations": provide_citations,
+ "retrieval_method": retrieval_method,
+ "tags": tags,
+ "temperature": temperature,
+ "top_p": top_p,
+ "body_uuid": body_uuid,
+ },
+ agent_update_params.AgentUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ only_deployed: bool | Omit = omit,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentListResponse:
+ """
+ To list all agents, send a GET request to `/v2/gen-ai/agents`.
+
+ Args:
+ only_deployed: Only list agents that are deployed.
+
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/agents"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "only_deployed": only_deployed,
+ "page": page,
+ "per_page": per_page,
+ },
+ agent_list_params.AgentListParams,
+ ),
+ ),
+ cast_to=AgentListResponse,
+ )
+
+ async def delete(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentDeleteResponse:
+ """
+ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/agents/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentDeleteResponse,
+ )
+
+ async def retrieve_usage(
+ self,
+ uuid: str,
+ *,
+ start: str | Omit = omit,
+ stop: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentRetrieveUsageResponse:
+ """
+ To get agent usage, send a GET request to `/v2/gen-ai/agents/{uuid}/usage`.
+ Returns usage metrics for the specified agent within the provided time range.
+
+ Args:
+ start: Return all usage data from this date.
+
+ stop: Return all usage data up to this date, if omitted, will return up to the current
+ date.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/agents/{uuid}/usage"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/usage",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "start": start,
+ "stop": stop,
+ },
+ agent_retrieve_usage_params.AgentRetrieveUsageParams,
+ ),
+ ),
+ cast_to=AgentRetrieveUsageResponse,
+ )
+
+ async def update_status(
+ self,
+ path_uuid: str,
+ *,
+ body_uuid: str | Omit = omit,
+ visibility: APIDeploymentVisibility | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentUpdateStatusResponse:
+ """Check whether an agent is public or private.
+
+ To update the agent status, send a
+ PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`.
+
+ Args:
+ body_uuid: Unique id
+
+ visibility: - VISIBILITY_UNKNOWN: The status of the deployment is unknown
+ - VISIBILITY_DISABLED: The deployment is disabled and will no longer service
+ requests
+ - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state
+ - VISIBILITY_PUBLIC: The deployment is public and will service requests from the
+ public internet
+ - VISIBILITY_PRIVATE: The deployment is private and will only service requests
+ from other agents, or through API keys
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/deployment_visibility",
+ body=await async_maybe_transform(
+ {
+ "body_uuid": body_uuid,
+ "visibility": visibility,
+ },
+ agent_update_status_params.AgentUpdateStatusParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentUpdateStatusResponse,
+ )
+
+ async def wait_until_ready(
+ self,
+ uuid: str,
+ *,
+ timeout: float = 300.0,
+ poll_interval: float = 5.0,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ ) -> AgentRetrieveResponse:
+ """Wait for an agent to be ready (deployment status is STATUS_RUNNING).
+
+ This method polls the agent status until it reaches STATUS_RUNNING or a terminal
+ error state. It handles timeout and deployment failures automatically.
+
+ Args:
+ uuid: The unique identifier of the agent to wait for
+
+ timeout: Maximum time to wait in seconds (default: 300 seconds / 5 minutes)
+
+ poll_interval: Time to wait between status checks in seconds (default: 5 seconds)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ Returns:
+ AgentRetrieveResponse: The agent response when it reaches STATUS_RUNNING
+
+ Raises:
+ AgentDeploymentError: If the agent deployment fails (STATUS_FAILED,
+ STATUS_UNDEPLOYMENT_FAILED, or STATUS_DELETED)
+ AgentDeploymentTimeoutError: If the agent doesn't reach STATUS_RUNNING
+ within the timeout period
+ ValueError: If uuid is empty
+ """
+ import asyncio
+
+ from ..._exceptions import AgentDeploymentError, AgentDeploymentTimeoutError
+
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+
+ start_time = time.time()
+
+ while True:
+ agent_response = await self.retrieve(
+ uuid,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ )
+
+ # Check if agent and deployment exist
+ if agent_response.agent and agent_response.agent.deployment:
+ status = agent_response.agent.deployment.status
+
+ # Success case
+ if status == "STATUS_RUNNING":
+ return agent_response
+
+ # Failure cases
+ if status in ("STATUS_FAILED", "STATUS_UNDEPLOYMENT_FAILED", "STATUS_DELETED"):
+ raise AgentDeploymentError(
+ f"Agent deployment failed with status: {status}",
+ status=status,
+ )
+
+ # Check timeout
+ elapsed_time = time.time() - start_time
+ if elapsed_time >= timeout:
+ current_status = (
+ agent_response.agent.deployment.status
+ if agent_response.agent and agent_response.agent.deployment
+ else "UNKNOWN"
+ )
+ raise AgentDeploymentTimeoutError(
+ f"Agent did not reach STATUS_RUNNING within {timeout} seconds. Current status: {current_status}",
+ agent_id=uuid,
+ )
+
+ # Wait before polling again
+ await asyncio.sleep(poll_interval)
+
+
+class AgentsResourceWithRawResponse:
+ def __init__(self, agents: AgentsResource) -> None:
+ self._agents = agents
+
+ self.create = to_raw_response_wrapper(
+ agents.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ agents.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ agents.update,
+ )
+ self.list = to_raw_response_wrapper(
+ agents.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ agents.delete,
+ )
+ self.retrieve_usage = to_raw_response_wrapper(
+ agents.retrieve_usage,
+ )
+ self.update_status = to_raw_response_wrapper(
+ agents.update_status,
+ )
+ self.wait_until_ready = to_raw_response_wrapper(
+ agents.wait_until_ready,
+ )
+
+ @cached_property
+ def api_keys(self) -> APIKeysResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return APIKeysResourceWithRawResponse(self._agents.api_keys)
+
+ @cached_property
+ def chat(self) -> ChatResourceWithRawResponse:
+ return ChatResourceWithRawResponse(self._agents.chat)
+
+ @cached_property
+ def evaluation_metrics(self) -> EvaluationMetricsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationMetricsResourceWithRawResponse(self._agents.evaluation_metrics)
+
+ @cached_property
+ def evaluation_runs(self) -> EvaluationRunsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationRunsResourceWithRawResponse(self._agents.evaluation_runs)
+
+ @cached_property
+ def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationTestCasesResourceWithRawResponse(self._agents.evaluation_test_cases)
+
+ @cached_property
+ def evaluation_datasets(self) -> EvaluationDatasetsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationDatasetsResourceWithRawResponse(self._agents.evaluation_datasets)
+
+ @cached_property
+ def functions(self) -> FunctionsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return FunctionsResourceWithRawResponse(self._agents.functions)
+
+ @cached_property
+ def versions(self) -> VersionsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return VersionsResourceWithRawResponse(self._agents.versions)
+
+ @cached_property
+ def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases)
+
+ @cached_property
+ def routes(self) -> RoutesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return RoutesResourceWithRawResponse(self._agents.routes)
+
+
+class AsyncAgentsResourceWithRawResponse:
+ def __init__(self, agents: AsyncAgentsResource) -> None:
+ self._agents = agents
+
+ self.create = async_to_raw_response_wrapper(
+ agents.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ agents.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ agents.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ agents.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ agents.delete,
+ )
+ self.retrieve_usage = async_to_raw_response_wrapper(
+ agents.retrieve_usage,
+ )
+ self.update_status = async_to_raw_response_wrapper(
+ agents.update_status,
+ )
+ self.wait_until_ready = async_to_raw_response_wrapper(
+ agents.wait_until_ready,
+ )
+
+ @cached_property
+ def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys)
+
+ @cached_property
+ def chat(self) -> AsyncChatResourceWithRawResponse:
+ return AsyncChatResourceWithRawResponse(self._agents.chat)
+
+ @cached_property
+ def evaluation_metrics(self) -> AsyncEvaluationMetricsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationMetricsResourceWithRawResponse(self._agents.evaluation_metrics)
+
+ @cached_property
+ def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationRunsResourceWithRawResponse(self._agents.evaluation_runs)
+
+ @cached_property
+ def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationTestCasesResourceWithRawResponse(self._agents.evaluation_test_cases)
+
+ @cached_property
+ def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationDatasetsResourceWithRawResponse(self._agents.evaluation_datasets)
+
+ @cached_property
+ def functions(self) -> AsyncFunctionsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncFunctionsResourceWithRawResponse(self._agents.functions)
+
+ @cached_property
+ def versions(self) -> AsyncVersionsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncVersionsResourceWithRawResponse(self._agents.versions)
+
+ @cached_property
+ def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases)
+
+ @cached_property
+ def routes(self) -> AsyncRoutesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncRoutesResourceWithRawResponse(self._agents.routes)
+
+
+class AgentsResourceWithStreamingResponse:
+ def __init__(self, agents: AgentsResource) -> None:
+ self._agents = agents
+
+ self.create = to_streamed_response_wrapper(
+ agents.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ agents.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ agents.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ agents.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ agents.delete,
+ )
+ self.retrieve_usage = to_streamed_response_wrapper(
+ agents.retrieve_usage,
+ )
+ self.update_status = to_streamed_response_wrapper(
+ agents.update_status,
+ )
+ self.wait_until_ready = to_streamed_response_wrapper(
+ agents.wait_until_ready,
+ )
+
+ @cached_property
+ def api_keys(self) -> APIKeysResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return APIKeysResourceWithStreamingResponse(self._agents.api_keys)
+
+ @cached_property
+ def chat(self) -> ChatResourceWithStreamingResponse:
+ return ChatResourceWithStreamingResponse(self._agents.chat)
+
+ @cached_property
+ def evaluation_metrics(self) -> EvaluationMetricsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationMetricsResourceWithStreamingResponse(self._agents.evaluation_metrics)
+
+ @cached_property
+ def evaluation_runs(self) -> EvaluationRunsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationRunsResourceWithStreamingResponse(self._agents.evaluation_runs)
+
+ @cached_property
+ def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationTestCasesResourceWithStreamingResponse(self._agents.evaluation_test_cases)
+
+ @cached_property
+ def evaluation_datasets(self) -> EvaluationDatasetsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return EvaluationDatasetsResourceWithStreamingResponse(self._agents.evaluation_datasets)
+
+ @cached_property
+ def functions(self) -> FunctionsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return FunctionsResourceWithStreamingResponse(self._agents.functions)
+
+ @cached_property
+ def versions(self) -> VersionsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return VersionsResourceWithStreamingResponse(self._agents.versions)
+
+ @cached_property
+ def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases)
+
+ @cached_property
+ def routes(self) -> RoutesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return RoutesResourceWithStreamingResponse(self._agents.routes)
+
+
+class AsyncAgentsResourceWithStreamingResponse:
+ def __init__(self, agents: AsyncAgentsResource) -> None:
+ self._agents = agents
+
+ self.create = async_to_streamed_response_wrapper(
+ agents.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ agents.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ agents.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ agents.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ agents.delete,
+ )
+ self.retrieve_usage = async_to_streamed_response_wrapper(
+ agents.retrieve_usage,
+ )
+ self.update_status = async_to_streamed_response_wrapper(
+ agents.update_status,
+ )
+ self.wait_until_ready = async_to_streamed_response_wrapper(
+ agents.wait_until_ready,
+ )
+
+ @cached_property
+ def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys)
+
+ @cached_property
+ def chat(self) -> AsyncChatResourceWithStreamingResponse:
+ return AsyncChatResourceWithStreamingResponse(self._agents.chat)
+
+ @cached_property
+ def evaluation_metrics(self) -> AsyncEvaluationMetricsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationMetricsResourceWithStreamingResponse(self._agents.evaluation_metrics)
+
+ @cached_property
+ def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationRunsResourceWithStreamingResponse(self._agents.evaluation_runs)
+
+ @cached_property
+ def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationTestCasesResourceWithStreamingResponse(self._agents.evaluation_test_cases)
+
+ @cached_property
+ def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncEvaluationDatasetsResourceWithStreamingResponse(self._agents.evaluation_datasets)
+
+ @cached_property
+ def functions(self) -> AsyncFunctionsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions)
+
+ @cached_property
+ def versions(self) -> AsyncVersionsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncVersionsResourceWithStreamingResponse(self._agents.versions)
+
+ @cached_property
+ def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases)
+
+ @cached_property
+ def routes(self) -> AsyncRoutesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncRoutesResourceWithStreamingResponse(self._agents.routes)
diff --git a/src/gradient/resources/agents/api_keys.py b/src/gradient/resources/agents/api_keys.py
new file mode 100644
index 00000000..8a045851
--- /dev/null
+++ b/src/gradient/resources/agents/api_keys.py
@@ -0,0 +1,629 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.agents import api_key_list_params, api_key_create_params, api_key_update_params
+from ...types.agents.api_key_list_response import APIKeyListResponse
+from ...types.agents.api_key_create_response import APIKeyCreateResponse
+from ...types.agents.api_key_delete_response import APIKeyDeleteResponse
+from ...types.agents.api_key_update_response import APIKeyUpdateResponse
+from ...types.agents.api_key_regenerate_response import APIKeyRegenerateResponse
+
+__all__ = ["APIKeysResource", "AsyncAPIKeysResource"]
+
+
+class APIKeysResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> APIKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return APIKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return APIKeysResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ path_agent_uuid: str,
+ *,
+ body_agent_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyCreateResponse:
+ """
+ To create an agent API key, send a POST request to
+ `/v2/gen-ai/agents/{agent_uuid}/api_keys`.
+
+ Args:
+ body_agent_uuid: Agent id
+
+ name: A human friendly name to identify the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}")
+ return self._post(
+ f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys",
+ body=maybe_transform(
+ {
+ "body_agent_uuid": body_agent_uuid,
+ "name": name,
+ },
+ api_key_create_params.APIKeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyCreateResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ path_agent_uuid: str,
+ body_agent_uuid: str | Omit = omit,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyUpdateResponse:
+ """
+ To update an agent API key, send a PUT request to
+ `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`.
+
+ Args:
+ body_agent_uuid: Agent id
+
+ body_api_key_uuid: API key ID
+
+ name: Name
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}")
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "body_agent_uuid": body_agent_uuid,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ api_key_update_params.APIKeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyUpdateResponse,
+ )
+
+ def list(
+ self,
+ agent_uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyListResponse:
+ """
+ To list all agent API keys, send a GET request to
+ `/v2/gen-ai/agents/{agent_uuid}/api_keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/agents/{agent_uuid}/api_keys"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ api_key_list_params.APIKeyListParams,
+ ),
+ ),
+ cast_to=APIKeyListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyDeleteResponse:
+ """
+ To delete an API key for an agent, send a DELETE request to
+ `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyDeleteResponse,
+ )
+
+ def regenerate(
+ self,
+ api_key_uuid: str,
+ *,
+ agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyRegenerateResponse:
+ """
+ To regenerate an agent API key, send a PUT request to
+ `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyRegenerateResponse,
+ )
+
+
+class AsyncAPIKeysResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAPIKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncAPIKeysResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ path_agent_uuid: str,
+ *,
+ body_agent_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyCreateResponse:
+ """
+ To create an agent API key, send a POST request to
+ `/v2/gen-ai/agents/{agent_uuid}/api_keys`.
+
+ Args:
+ body_agent_uuid: Agent id
+
+ name: A human friendly name to identify the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}")
+ return await self._post(
+ f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys",
+ body=await async_maybe_transform(
+ {
+ "body_agent_uuid": body_agent_uuid,
+ "name": name,
+ },
+ api_key_create_params.APIKeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyCreateResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ path_agent_uuid: str,
+ body_agent_uuid: str | Omit = omit,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyUpdateResponse:
+ """
+ To update an agent API key, send a PUT request to
+ `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`.
+
+ Args:
+ body_agent_uuid: Agent id
+
+ body_api_key_uuid: API key ID
+
+ name: Name
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}")
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "body_agent_uuid": body_agent_uuid,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ api_key_update_params.APIKeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyUpdateResponse,
+ )
+
+ async def list(
+ self,
+ agent_uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyListResponse:
+ """
+ To list all agent API keys, send a GET request to
+ `/v2/gen-ai/agents/{agent_uuid}/api_keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/agents/{agent_uuid}/api_keys"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ api_key_list_params.APIKeyListParams,
+ ),
+ ),
+ cast_to=APIKeyListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyDeleteResponse:
+ """
+ To delete an API key for an agent, send a DELETE request to
+ `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyDeleteResponse,
+ )
+
+ async def regenerate(
+ self,
+ api_key_uuid: str,
+ *,
+ agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyRegenerateResponse:
+ """
+ To regenerate an agent API key, send a PUT request to
+ `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyRegenerateResponse,
+ )
+
+
+class APIKeysResourceWithRawResponse:
+ def __init__(self, api_keys: APIKeysResource) -> None:
+ self._api_keys = api_keys
+
+ self.create = to_raw_response_wrapper(
+ api_keys.create,
+ )
+ self.update = to_raw_response_wrapper(
+ api_keys.update,
+ )
+ self.list = to_raw_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ api_keys.delete,
+ )
+ self.regenerate = to_raw_response_wrapper(
+ api_keys.regenerate,
+ )
+
+
+class AsyncAPIKeysResourceWithRawResponse:
+ def __init__(self, api_keys: AsyncAPIKeysResource) -> None:
+ self._api_keys = api_keys
+
+ self.create = async_to_raw_response_wrapper(
+ api_keys.create,
+ )
+ self.update = async_to_raw_response_wrapper(
+ api_keys.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ api_keys.delete,
+ )
+ self.regenerate = async_to_raw_response_wrapper(
+ api_keys.regenerate,
+ )
+
+
+class APIKeysResourceWithStreamingResponse:
+ def __init__(self, api_keys: APIKeysResource) -> None:
+ self._api_keys = api_keys
+
+ self.create = to_streamed_response_wrapper(
+ api_keys.create,
+ )
+ self.update = to_streamed_response_wrapper(
+ api_keys.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ api_keys.delete,
+ )
+ self.regenerate = to_streamed_response_wrapper(
+ api_keys.regenerate,
+ )
+
+
+class AsyncAPIKeysResourceWithStreamingResponse:
+ def __init__(self, api_keys: AsyncAPIKeysResource) -> None:
+ self._api_keys = api_keys
+
+ self.create = async_to_streamed_response_wrapper(
+ api_keys.create,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ api_keys.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ api_keys.delete,
+ )
+ self.regenerate = async_to_streamed_response_wrapper(
+ api_keys.regenerate,
+ )
diff --git a/src/digitalocean_genai_sdk/resources/chat/__init__.py b/src/gradient/resources/agents/chat/__init__.py
similarity index 100%
rename from src/digitalocean_genai_sdk/resources/chat/__init__.py
rename to src/gradient/resources/agents/chat/__init__.py
diff --git a/src/gradient/resources/agents/chat/chat.py b/src/gradient/resources/agents/chat/chat.py
new file mode 100644
index 00000000..a087c84d
--- /dev/null
+++ b/src/gradient/resources/agents/chat/chat.py
@@ -0,0 +1,120 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ...._compat import cached_property
+from .completions import (
+ CompletionsResource,
+ AsyncCompletionsResource,
+ CompletionsResourceWithRawResponse,
+ AsyncCompletionsResourceWithRawResponse,
+ CompletionsResourceWithStreamingResponse,
+ AsyncCompletionsResourceWithStreamingResponse,
+)
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["ChatResource", "AsyncChatResource"]
+
+
+class ChatResource(SyncAPIResource):
+ @cached_property
+ def completions(self) -> CompletionsResource:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return CompletionsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ChatResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ChatResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ChatResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ChatResourceWithStreamingResponse(self)
+
+
+class AsyncChatResource(AsyncAPIResource):
+ @cached_property
+ def completions(self) -> AsyncCompletionsResource:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return AsyncCompletionsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncChatResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncChatResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncChatResourceWithStreamingResponse(self)
+
+
+class ChatResourceWithRawResponse:
+ def __init__(self, chat: ChatResource) -> None:
+ self._chat = chat
+
+ @cached_property
+ def completions(self) -> CompletionsResourceWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return CompletionsResourceWithRawResponse(self._chat.completions)
+
+
+class AsyncChatResourceWithRawResponse:
+ def __init__(self, chat: AsyncChatResource) -> None:
+ self._chat = chat
+
+ @cached_property
+ def completions(self) -> AsyncCompletionsResourceWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return AsyncCompletionsResourceWithRawResponse(self._chat.completions)
+
+
+class ChatResourceWithStreamingResponse:
+ def __init__(self, chat: ChatResource) -> None:
+ self._chat = chat
+
+ @cached_property
+ def completions(self) -> CompletionsResourceWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return CompletionsResourceWithStreamingResponse(self._chat.completions)
+
+
+class AsyncChatResourceWithStreamingResponse:
+ def __init__(self, chat: AsyncChatResource) -> None:
+ self._chat = chat
+
+ @cached_property
+ def completions(self) -> AsyncCompletionsResourceWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return AsyncCompletionsResourceWithStreamingResponse(self._chat.completions)
diff --git a/src/gradient/resources/agents/chat/completions.py b/src/gradient/resources/agents/chat/completions.py
new file mode 100644
index 00000000..619a2712
--- /dev/null
+++ b/src/gradient/resources/agents/chat/completions.py
@@ -0,0 +1,1073 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ...._utils import required_args, maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._streaming import Stream, AsyncStream
+from ...._base_client import make_request_options
+from ....types.agents.chat import completion_create_params
+from ....types.shared.chat_completion_chunk import ChatCompletionChunk
+from ....types.agents.chat.completion_create_response import CompletionCreateResponse
+
+__all__ = ["CompletionsResource", "AsyncCompletionsResource"]
+
+
+class CompletionsResource(SyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> CompletionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return CompletionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return CompletionsResourceWithStreamingResponse(self)
+
+ @overload
+ def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ stream: Literal[True],
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Stream[ChatCompletionChunk]:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ stream: bool,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(
+ ["messages", "model"],
+ ["messages", "model", "stream"],
+ )
+ def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
+ # This method requires an agent_access_key to be set via client argument or environment variable
+ if not self._client.agent_access_key:
+ raise TypeError(
+ "Could not resolve authentication method. Expected agent_access_key to be set for chat completions."
+ )
+ headers = extra_headers or {}
+ headers = {"Authorization": f"Bearer {self._client.agent_access_key}", **headers}
+
+ return self._post(
+ "/chat/completions?agent=true"
+ if self._client._base_url_overridden
+ else f"{self._client.agent_endpoint}/api/v1/chat/completions?agent=true",
+ body=maybe_transform(
+ {
+ "messages": messages,
+ "model": model,
+ "frequency_penalty": frequency_penalty,
+ "logit_bias": logit_bias,
+ "logprobs": logprobs,
+ "max_completion_tokens": max_completion_tokens,
+ "max_tokens": max_tokens,
+ "metadata": metadata,
+ "n": n,
+ "presence_penalty": presence_penalty,
+ "reasoning_effort": reasoning_effort,
+ "stop": stop,
+ "stream": stream,
+ "stream_options": stream_options,
+ "temperature": temperature,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_logprobs": top_logprobs,
+ "top_p": top_p,
+ "user": user,
+ },
+ (
+ completion_create_params.CompletionCreateParamsStreaming
+ if stream
+ else completion_create_params.CompletionCreateParamsNonStreaming
+ ),
+ ),
+ options=make_request_options(
+ extra_headers=headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=CompletionCreateResponse,
+ stream=stream or False,
+ stream_cls=Stream[ChatCompletionChunk],
+ )
+
+
+class AsyncCompletionsResource(AsyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncCompletionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncCompletionsResourceWithStreamingResponse(self)
+
+ @overload
+ async def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ stream: Literal[True],
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncStream[ChatCompletionChunk]:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ stream: bool,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["messages", "model"], ["messages", "model", "stream"])
+ async def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
+ # This method requires an agent_access_key to be set via client argument or environment variable
+ if not self._client.agent_access_key:
+ raise TypeError(
+ "Could not resolve authentication method. Expected agent_access_key to be set for chat completions."
+ )
+ headers = extra_headers or {}
+ headers = {"Authorization": f"Bearer {self._client.agent_access_key}", **headers}
+
+ return await self._post(
+ "/chat/completions?agent=true"
+ if self._client._base_url_overridden
+ else f"{self._client.agent_endpoint}/api/v1/chat/completions?agent=true",
+ body=await async_maybe_transform(
+ {
+ "messages": messages,
+ "model": model,
+ "frequency_penalty": frequency_penalty,
+ "logit_bias": logit_bias,
+ "logprobs": logprobs,
+ "max_completion_tokens": max_completion_tokens,
+ "max_tokens": max_tokens,
+ "metadata": metadata,
+ "n": n,
+ "presence_penalty": presence_penalty,
+ "reasoning_effort": reasoning_effort,
+ "stop": stop,
+ "stream": stream,
+ "stream_options": stream_options,
+ "temperature": temperature,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_logprobs": top_logprobs,
+ "top_p": top_p,
+ "user": user,
+ },
+ (
+ completion_create_params.CompletionCreateParamsStreaming
+ if stream
+ else completion_create_params.CompletionCreateParamsNonStreaming
+ ),
+ ),
+ options=make_request_options(
+ extra_headers=headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=CompletionCreateResponse,
+ stream=stream or False,
+ stream_cls=AsyncStream[ChatCompletionChunk],
+ )
+
+
+class CompletionsResourceWithRawResponse:
+ def __init__(self, completions: CompletionsResource) -> None:
+ self._completions = completions
+
+ self.create = to_raw_response_wrapper(
+ completions.create,
+ )
+
+
+class AsyncCompletionsResourceWithRawResponse:
+ def __init__(self, completions: AsyncCompletionsResource) -> None:
+ self._completions = completions
+
+ self.create = async_to_raw_response_wrapper(
+ completions.create,
+ )
+
+
+class CompletionsResourceWithStreamingResponse:
+ def __init__(self, completions: CompletionsResource) -> None:
+ self._completions = completions
+
+ self.create = to_streamed_response_wrapper(
+ completions.create,
+ )
+
+
+class AsyncCompletionsResourceWithStreamingResponse:
+ def __init__(self, completions: AsyncCompletionsResource) -> None:
+ self._completions = completions
+
+ self.create = async_to_streamed_response_wrapper(
+ completions.create,
+ )
diff --git a/src/gradient/resources/agents/evaluation_datasets.py b/src/gradient/resources/agents/evaluation_datasets.py
new file mode 100644
index 00000000..47ba1ba1
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_datasets.py
@@ -0,0 +1,311 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.agents import (
+ evaluation_dataset_create_params,
+ evaluation_dataset_create_file_upload_presigned_urls_params,
+)
+from ...types.agents.evaluation_dataset_create_response import EvaluationDatasetCreateResponse
+from ...types.knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam
+from ...types.agents.evaluation_dataset_create_file_upload_presigned_urls_response import (
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse,
+)
+
+__all__ = ["EvaluationDatasetsResource", "AsyncEvaluationDatasetsResource"]
+
+
+class EvaluationDatasetsResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> EvaluationDatasetsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return EvaluationDatasetsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> EvaluationDatasetsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return EvaluationDatasetsResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ dataset_type: Literal[
+ "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", "EVALUATION_DATASET_TYPE_NON_ADK"
+ ]
+ | Omit = omit,
+ file_upload_dataset: APIFileUploadDataSourceParam | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationDatasetCreateResponse:
+ """
+ To create an evaluation dataset, send a POST request to
+ `/v2/gen-ai/evaluation_datasets`.
+
+ Args:
+ file_upload_dataset: File to upload as data source for knowledge base.
+
+ name: The name of the agent evaluation dataset.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/evaluation_datasets"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets",
+ body=maybe_transform(
+ {
+ "dataset_type": dataset_type,
+ "file_upload_dataset": file_upload_dataset,
+ "name": name,
+ },
+ evaluation_dataset_create_params.EvaluationDatasetCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationDatasetCreateResponse,
+ )
+
+ def create_file_upload_presigned_urls(
+ self,
+ *,
+ files: Iterable[evaluation_dataset_create_file_upload_presigned_urls_params.File] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse:
+ """
+ To create presigned URLs for evaluation dataset file upload, send a POST request
+ to `/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls`.
+
+ Args:
+ files: A list of files to generate presigned URLs for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls",
+ body=maybe_transform(
+ {"files": files},
+ evaluation_dataset_create_file_upload_presigned_urls_params.EvaluationDatasetCreateFileUploadPresignedURLsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationDatasetCreateFileUploadPresignedURLsResponse,
+ )
+
+
+class AsyncEvaluationDatasetsResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncEvaluationDatasetsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncEvaluationDatasetsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncEvaluationDatasetsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncEvaluationDatasetsResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ dataset_type: Literal[
+ "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", "EVALUATION_DATASET_TYPE_NON_ADK"
+ ]
+ | Omit = omit,
+ file_upload_dataset: APIFileUploadDataSourceParam | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationDatasetCreateResponse:
+ """
+ To create an evaluation dataset, send a POST request to
+ `/v2/gen-ai/evaluation_datasets`.
+
+ Args:
+ file_upload_dataset: File to upload as data source for knowledge base.
+
+ name: The name of the agent evaluation dataset.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/evaluation_datasets"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets",
+ body=await async_maybe_transform(
+ {
+ "dataset_type": dataset_type,
+ "file_upload_dataset": file_upload_dataset,
+ "name": name,
+ },
+ evaluation_dataset_create_params.EvaluationDatasetCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationDatasetCreateResponse,
+ )
+
+ async def create_file_upload_presigned_urls(
+ self,
+ *,
+ files: Iterable[evaluation_dataset_create_file_upload_presigned_urls_params.File] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse:
+ """
+ To create presigned URLs for evaluation dataset file upload, send a POST request
+ to `/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls`.
+
+ Args:
+ files: A list of files to generate presigned URLs for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls",
+ body=await async_maybe_transform(
+ {"files": files},
+ evaluation_dataset_create_file_upload_presigned_urls_params.EvaluationDatasetCreateFileUploadPresignedURLsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationDatasetCreateFileUploadPresignedURLsResponse,
+ )
+
+
+class EvaluationDatasetsResourceWithRawResponse:
+ def __init__(self, evaluation_datasets: EvaluationDatasetsResource) -> None:
+ self._evaluation_datasets = evaluation_datasets
+
+ self.create = to_raw_response_wrapper(
+ evaluation_datasets.create,
+ )
+ self.create_file_upload_presigned_urls = to_raw_response_wrapper(
+ evaluation_datasets.create_file_upload_presigned_urls,
+ )
+
+
+class AsyncEvaluationDatasetsResourceWithRawResponse:
+ def __init__(self, evaluation_datasets: AsyncEvaluationDatasetsResource) -> None:
+ self._evaluation_datasets = evaluation_datasets
+
+ self.create = async_to_raw_response_wrapper(
+ evaluation_datasets.create,
+ )
+ self.create_file_upload_presigned_urls = async_to_raw_response_wrapper(
+ evaluation_datasets.create_file_upload_presigned_urls,
+ )
+
+
+class EvaluationDatasetsResourceWithStreamingResponse:
+ def __init__(self, evaluation_datasets: EvaluationDatasetsResource) -> None:
+ self._evaluation_datasets = evaluation_datasets
+
+ self.create = to_streamed_response_wrapper(
+ evaluation_datasets.create,
+ )
+ self.create_file_upload_presigned_urls = to_streamed_response_wrapper(
+ evaluation_datasets.create_file_upload_presigned_urls,
+ )
+
+
+class AsyncEvaluationDatasetsResourceWithStreamingResponse:
+ def __init__(self, evaluation_datasets: AsyncEvaluationDatasetsResource) -> None:
+ self._evaluation_datasets = evaluation_datasets
+
+ self.create = async_to_streamed_response_wrapper(
+ evaluation_datasets.create,
+ )
+ self.create_file_upload_presigned_urls = async_to_streamed_response_wrapper(
+ evaluation_datasets.create_file_upload_presigned_urls,
+ )
diff --git a/src/gradient/resources/agents/evaluation_metrics/__init__.py b/src/gradient/resources/agents/evaluation_metrics/__init__.py
new file mode 100644
index 00000000..fcb54c78
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/__init__.py
@@ -0,0 +1,89 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .oauth2 import (
+ Oauth2Resource,
+ AsyncOauth2Resource,
+ Oauth2ResourceWithRawResponse,
+ AsyncOauth2ResourceWithRawResponse,
+ Oauth2ResourceWithStreamingResponse,
+ AsyncOauth2ResourceWithStreamingResponse,
+)
+from .openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+from .anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+from .workspaces import (
+ WorkspacesResource,
+ AsyncWorkspacesResource,
+ WorkspacesResourceWithRawResponse,
+ AsyncWorkspacesResourceWithRawResponse,
+ WorkspacesResourceWithStreamingResponse,
+ AsyncWorkspacesResourceWithStreamingResponse,
+)
+from .evaluation_metrics import (
+ EvaluationMetricsResource,
+ AsyncEvaluationMetricsResource,
+ EvaluationMetricsResourceWithRawResponse,
+ AsyncEvaluationMetricsResourceWithRawResponse,
+ EvaluationMetricsResourceWithStreamingResponse,
+ AsyncEvaluationMetricsResourceWithStreamingResponse,
+)
+from .scheduled_indexing import (
+ ScheduledIndexingResource,
+ AsyncScheduledIndexingResource,
+ ScheduledIndexingResourceWithRawResponse,
+ AsyncScheduledIndexingResourceWithRawResponse,
+ ScheduledIndexingResourceWithStreamingResponse,
+ AsyncScheduledIndexingResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "WorkspacesResource",
+ "AsyncWorkspacesResource",
+ "WorkspacesResourceWithRawResponse",
+ "AsyncWorkspacesResourceWithRawResponse",
+ "WorkspacesResourceWithStreamingResponse",
+ "AsyncWorkspacesResourceWithStreamingResponse",
+ "AnthropicResource",
+ "AsyncAnthropicResource",
+ "AnthropicResourceWithRawResponse",
+ "AsyncAnthropicResourceWithRawResponse",
+ "AnthropicResourceWithStreamingResponse",
+ "AsyncAnthropicResourceWithStreamingResponse",
+ "OpenAIResource",
+ "AsyncOpenAIResource",
+ "OpenAIResourceWithRawResponse",
+ "AsyncOpenAIResourceWithRawResponse",
+ "OpenAIResourceWithStreamingResponse",
+ "AsyncOpenAIResourceWithStreamingResponse",
+ "Oauth2Resource",
+ "AsyncOauth2Resource",
+ "Oauth2ResourceWithRawResponse",
+ "AsyncOauth2ResourceWithRawResponse",
+ "Oauth2ResourceWithStreamingResponse",
+ "AsyncOauth2ResourceWithStreamingResponse",
+ "ScheduledIndexingResource",
+ "AsyncScheduledIndexingResource",
+ "ScheduledIndexingResourceWithRawResponse",
+ "AsyncScheduledIndexingResourceWithRawResponse",
+ "ScheduledIndexingResourceWithStreamingResponse",
+ "AsyncScheduledIndexingResourceWithStreamingResponse",
+ "EvaluationMetricsResource",
+ "AsyncEvaluationMetricsResource",
+ "EvaluationMetricsResourceWithRawResponse",
+ "AsyncEvaluationMetricsResourceWithRawResponse",
+ "EvaluationMetricsResourceWithStreamingResponse",
+ "AsyncEvaluationMetricsResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/agents/evaluation_metrics/anthropic/__init__.py b/src/gradient/resources/agents/evaluation_metrics/anthropic/__init__.py
new file mode 100644
index 00000000..057a3a2f
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/anthropic/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from .anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "KeysResource",
+ "AsyncKeysResource",
+ "KeysResourceWithRawResponse",
+ "AsyncKeysResourceWithRawResponse",
+ "KeysResourceWithStreamingResponse",
+ "AsyncKeysResourceWithStreamingResponse",
+ "AnthropicResource",
+ "AsyncAnthropicResource",
+ "AnthropicResourceWithRawResponse",
+ "AsyncAnthropicResourceWithRawResponse",
+ "AnthropicResourceWithStreamingResponse",
+ "AsyncAnthropicResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/agents/evaluation_metrics/anthropic/anthropic.py b/src/gradient/resources/agents/evaluation_metrics/anthropic/anthropic.py
new file mode 100644
index 00000000..20e6e5b1
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/anthropic/anthropic.py
@@ -0,0 +1,120 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["AnthropicResource", "AsyncAnthropicResource"]
+
+
+class AnthropicResource(SyncAPIResource):
+ @cached_property
+ def keys(self) -> KeysResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return KeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AnthropicResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AnthropicResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AnthropicResourceWithStreamingResponse(self)
+
+
+class AsyncAnthropicResource(AsyncAPIResource):
+ @cached_property
+ def keys(self) -> AsyncKeysResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncKeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAnthropicResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncAnthropicResourceWithStreamingResponse(self)
+
+
+class AnthropicResourceWithRawResponse:
+ def __init__(self, anthropic: AnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> KeysResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return KeysResourceWithRawResponse(self._anthropic.keys)
+
+
+class AsyncAnthropicResourceWithRawResponse:
+ def __init__(self, anthropic: AsyncAnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncKeysResourceWithRawResponse(self._anthropic.keys)
+
+
+class AnthropicResourceWithStreamingResponse:
+ def __init__(self, anthropic: AnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> KeysResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return KeysResourceWithStreamingResponse(self._anthropic.keys)
+
+
+class AsyncAnthropicResourceWithStreamingResponse:
+ def __init__(self, anthropic: AsyncAnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys)
diff --git a/src/gradient/resources/agents/evaluation_metrics/anthropic/keys.py b/src/gradient/resources/agents/evaluation_metrics/anthropic/keys.py
new file mode 100644
index 00000000..195d8f88
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/anthropic/keys.py
@@ -0,0 +1,719 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ....._base_client import make_request_options
+from .....types.agents.evaluation_metrics.anthropic import (
+ key_list_params,
+ key_create_params,
+ key_update_params,
+ key_list_agents_params,
+)
+from .....types.agents.evaluation_metrics.anthropic.key_list_response import KeyListResponse
+from .....types.agents.evaluation_metrics.anthropic.key_create_response import KeyCreateResponse
+from .....types.agents.evaluation_metrics.anthropic.key_delete_response import KeyDeleteResponse
+from .....types.agents.evaluation_metrics.anthropic.key_update_response import KeyUpdateResponse
+from .....types.agents.evaluation_metrics.anthropic.key_retrieve_response import KeyRetrieveResponse
+from .....types.agents.evaluation_metrics.anthropic.key_list_agents_response import KeyListAgentsResponse
+
+__all__ = ["KeysResource", "AsyncKeysResource"]
+
+
+class KeysResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> KeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return KeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> KeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return KeysResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ api_key: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyCreateResponse:
+ """
+ To create an Anthropic API key, send a POST request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ api_key: Anthropic API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an Anthropic API key, send a GET request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | Omit = omit,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyUpdateResponse:
+ """
+ To update an Anthropic API key, send a PUT request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: Anthropic API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyListResponse:
+ """
+ To list all Anthropic API keys, send a GET request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an Anthropic API key, send a DELETE request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyListAgentsResponse:
+ """
+ List Agents by Anthropic Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_agents_params.KeyListAgentsParams,
+ ),
+ ),
+ cast_to=KeyListAgentsResponse,
+ )
+
+
+class AsyncKeysResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncKeysResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ api_key: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyCreateResponse:
+ """
+ To create an Anthropic API key, send a POST request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ api_key: Anthropic API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an Anthropic API key, send a GET request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | Omit = omit,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyUpdateResponse:
+ """
+ To update an Anthropic API key, send a PUT request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: Anthropic API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyListResponse:
+ """
+ To list all Anthropic API keys, send a GET request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an Anthropic API key, send a DELETE request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ async def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyListAgentsResponse:
+ """
+ List Agents by Anthropic Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_agents_params.KeyListAgentsParams,
+ ),
+ ),
+ cast_to=KeyListAgentsResponse,
+ )
+
+
+class KeysResourceWithRawResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = to_raw_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class AsyncKeysResourceWithRawResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = async_to_raw_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class KeysResourceWithStreamingResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = to_streamed_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class AsyncKeysResourceWithStreamingResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = async_to_streamed_response_wrapper(
+ keys.list_agents,
+ )
diff --git a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py
new file mode 100644
index 00000000..1b081c70
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py
@@ -0,0 +1,478 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .oauth2.oauth2 import (
+ Oauth2Resource,
+ AsyncOauth2Resource,
+ Oauth2ResourceWithRawResponse,
+ AsyncOauth2ResourceWithRawResponse,
+ Oauth2ResourceWithStreamingResponse,
+ AsyncOauth2ResourceWithStreamingResponse,
+)
+from .openai.openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+from ...._base_client import make_request_options
+from ....types.agents import evaluation_metric_list_regions_params
+from .scheduled_indexing import (
+ ScheduledIndexingResource,
+ AsyncScheduledIndexingResource,
+ ScheduledIndexingResourceWithRawResponse,
+ AsyncScheduledIndexingResourceWithRawResponse,
+ ScheduledIndexingResourceWithStreamingResponse,
+ AsyncScheduledIndexingResourceWithStreamingResponse,
+)
+from .anthropic.anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+from .workspaces.workspaces import (
+ WorkspacesResource,
+ AsyncWorkspacesResource,
+ WorkspacesResourceWithRawResponse,
+ AsyncWorkspacesResourceWithRawResponse,
+ WorkspacesResourceWithStreamingResponse,
+ AsyncWorkspacesResourceWithStreamingResponse,
+)
+from ....types.agents.evaluation_metric_list_response import EvaluationMetricListResponse
+from ....types.agents.evaluation_metric_list_regions_response import EvaluationMetricListRegionsResponse
+
+__all__ = ["EvaluationMetricsResource", "AsyncEvaluationMetricsResource"]
+
+
+class EvaluationMetricsResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def workspaces(self) -> WorkspacesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return WorkspacesResource(self._client)
+
+ @cached_property
+ def anthropic(self) -> AnthropicResource:
+ return AnthropicResource(self._client)
+
+ @cached_property
+ def openai(self) -> OpenAIResource:
+ return OpenAIResource(self._client)
+
+ @cached_property
+ def oauth2(self) -> Oauth2Resource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return Oauth2Resource(self._client)
+
+ @cached_property
+ def scheduled_indexing(self) -> ScheduledIndexingResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return ScheduledIndexingResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return EvaluationMetricsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> EvaluationMetricsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return EvaluationMetricsResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationMetricListResponse:
+ """
+ To list all evaluation metrics, send a GET request to
+ `/v2/gen-ai/evaluation_metrics`.
+ """
+ return self._get(
+ "/v2/gen-ai/evaluation_metrics"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationMetricListResponse,
+ )
+
+ def list_regions(
+ self,
+ *,
+ serves_batch: bool | Omit = omit,
+ serves_inference: bool | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationMetricListRegionsResponse:
+ """
+ To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`.
+
+ Args:
+ serves_batch: Include datacenters that are capable of running batch jobs.
+
+ serves_inference: Include datacenters that serve inference.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/regions"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/regions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "serves_batch": serves_batch,
+ "serves_inference": serves_inference,
+ },
+ evaluation_metric_list_regions_params.EvaluationMetricListRegionsParams,
+ ),
+ ),
+ cast_to=EvaluationMetricListRegionsResponse,
+ )
+
+
+class AsyncEvaluationMetricsResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def workspaces(self) -> AsyncWorkspacesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncWorkspacesResource(self._client)
+
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResource:
+ return AsyncAnthropicResource(self._client)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResource:
+ return AsyncOpenAIResource(self._client)
+
+ @cached_property
+ def oauth2(self) -> AsyncOauth2Resource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncOauth2Resource(self._client)
+
+ @cached_property
+ def scheduled_indexing(self) -> AsyncScheduledIndexingResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncScheduledIndexingResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncEvaluationMetricsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncEvaluationMetricsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncEvaluationMetricsResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationMetricListResponse:
+ """
+ To list all evaluation metrics, send a GET request to
+ `/v2/gen-ai/evaluation_metrics`.
+ """
+ return await self._get(
+ "/v2/gen-ai/evaluation_metrics"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationMetricListResponse,
+ )
+
+ async def list_regions(
+ self,
+ *,
+ serves_batch: bool | Omit = omit,
+ serves_inference: bool | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationMetricListRegionsResponse:
+ """
+ To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`.
+
+ Args:
+ serves_batch: Include datacenters that are capable of running batch jobs.
+
+ serves_inference: Include datacenters that serve inference.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/regions"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/regions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "serves_batch": serves_batch,
+ "serves_inference": serves_inference,
+ },
+ evaluation_metric_list_regions_params.EvaluationMetricListRegionsParams,
+ ),
+ ),
+ cast_to=EvaluationMetricListRegionsResponse,
+ )
+
+
+class EvaluationMetricsResourceWithRawResponse:
+ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None:
+ self._evaluation_metrics = evaluation_metrics
+
+ self.list = to_raw_response_wrapper(
+ evaluation_metrics.list,
+ )
+ self.list_regions = to_raw_response_wrapper(
+ evaluation_metrics.list_regions,
+ )
+
+ @cached_property
+ def workspaces(self) -> WorkspacesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return WorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces)
+
+ @cached_property
+ def anthropic(self) -> AnthropicResourceWithRawResponse:
+ return AnthropicResourceWithRawResponse(self._evaluation_metrics.anthropic)
+
+ @cached_property
+ def openai(self) -> OpenAIResourceWithRawResponse:
+ return OpenAIResourceWithRawResponse(self._evaluation_metrics.openai)
+
+ @cached_property
+ def oauth2(self) -> Oauth2ResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return Oauth2ResourceWithRawResponse(self._evaluation_metrics.oauth2)
+
+ @cached_property
+ def scheduled_indexing(self) -> ScheduledIndexingResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return ScheduledIndexingResourceWithRawResponse(self._evaluation_metrics.scheduled_indexing)
+
+
+class AsyncEvaluationMetricsResourceWithRawResponse:
+ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
+ self._evaluation_metrics = evaluation_metrics
+
+ self.list = async_to_raw_response_wrapper(
+ evaluation_metrics.list,
+ )
+ self.list_regions = async_to_raw_response_wrapper(
+ evaluation_metrics.list_regions,
+ )
+
+ @cached_property
+ def workspaces(self) -> AsyncWorkspacesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncWorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces)
+
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResourceWithRawResponse:
+ return AsyncAnthropicResourceWithRawResponse(self._evaluation_metrics.anthropic)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResourceWithRawResponse:
+ return AsyncOpenAIResourceWithRawResponse(self._evaluation_metrics.openai)
+
+ @cached_property
+ def oauth2(self) -> AsyncOauth2ResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncOauth2ResourceWithRawResponse(self._evaluation_metrics.oauth2)
+
+ @cached_property
+ def scheduled_indexing(self) -> AsyncScheduledIndexingResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncScheduledIndexingResourceWithRawResponse(self._evaluation_metrics.scheduled_indexing)
+
+
+class EvaluationMetricsResourceWithStreamingResponse:
+ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None:
+ self._evaluation_metrics = evaluation_metrics
+
+ self.list = to_streamed_response_wrapper(
+ evaluation_metrics.list,
+ )
+ self.list_regions = to_streamed_response_wrapper(
+ evaluation_metrics.list_regions,
+ )
+
+ @cached_property
+ def workspaces(self) -> WorkspacesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return WorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces)
+
+ @cached_property
+ def anthropic(self) -> AnthropicResourceWithStreamingResponse:
+ return AnthropicResourceWithStreamingResponse(self._evaluation_metrics.anthropic)
+
+ @cached_property
+ def openai(self) -> OpenAIResourceWithStreamingResponse:
+ return OpenAIResourceWithStreamingResponse(self._evaluation_metrics.openai)
+
+ @cached_property
+ def oauth2(self) -> Oauth2ResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return Oauth2ResourceWithStreamingResponse(self._evaluation_metrics.oauth2)
+
+ @cached_property
+ def scheduled_indexing(self) -> ScheduledIndexingResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return ScheduledIndexingResourceWithStreamingResponse(self._evaluation_metrics.scheduled_indexing)
+
+
+class AsyncEvaluationMetricsResourceWithStreamingResponse:
+ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
+ self._evaluation_metrics = evaluation_metrics
+
+ self.list = async_to_streamed_response_wrapper(
+ evaluation_metrics.list,
+ )
+ self.list_regions = async_to_streamed_response_wrapper(
+ evaluation_metrics.list_regions,
+ )
+
+ @cached_property
+ def workspaces(self) -> AsyncWorkspacesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncWorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces)
+
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse:
+ return AsyncAnthropicResourceWithStreamingResponse(self._evaluation_metrics.anthropic)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResourceWithStreamingResponse:
+ return AsyncOpenAIResourceWithStreamingResponse(self._evaluation_metrics.openai)
+
+ @cached_property
+ def oauth2(self) -> AsyncOauth2ResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncOauth2ResourceWithStreamingResponse(self._evaluation_metrics.oauth2)
+
+ @cached_property
+ def scheduled_indexing(self) -> AsyncScheduledIndexingResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncScheduledIndexingResourceWithStreamingResponse(self._evaluation_metrics.scheduled_indexing)
diff --git a/src/gradient/resources/agents/evaluation_metrics/oauth2/__init__.py b/src/gradient/resources/agents/evaluation_metrics/oauth2/__init__.py
new file mode 100644
index 00000000..c74ddfe8
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/oauth2/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .oauth2 import (
+ Oauth2Resource,
+ AsyncOauth2Resource,
+ Oauth2ResourceWithRawResponse,
+ AsyncOauth2ResourceWithRawResponse,
+ Oauth2ResourceWithStreamingResponse,
+ AsyncOauth2ResourceWithStreamingResponse,
+)
+from .dropbox import (
+ DropboxResource,
+ AsyncDropboxResource,
+ DropboxResourceWithRawResponse,
+ AsyncDropboxResourceWithRawResponse,
+ DropboxResourceWithStreamingResponse,
+ AsyncDropboxResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "DropboxResource",
+ "AsyncDropboxResource",
+ "DropboxResourceWithRawResponse",
+ "AsyncDropboxResourceWithRawResponse",
+ "DropboxResourceWithStreamingResponse",
+ "AsyncDropboxResourceWithStreamingResponse",
+ "Oauth2Resource",
+ "AsyncOauth2Resource",
+ "Oauth2ResourceWithRawResponse",
+ "AsyncOauth2ResourceWithRawResponse",
+ "Oauth2ResourceWithStreamingResponse",
+ "AsyncOauth2ResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py b/src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py
new file mode 100644
index 00000000..137aa164
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py
@@ -0,0 +1,201 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ....._base_client import make_request_options
+from .....types.agents.evaluation_metrics.oauth2 import dropbox_create_tokens_params
+from .....types.agents.evaluation_metrics.oauth2.dropbox_create_tokens_response import DropboxCreateTokensResponse
+
+__all__ = ["DropboxResource", "AsyncDropboxResource"]
+
+
+class DropboxResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> DropboxResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return DropboxResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> DropboxResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return DropboxResourceWithStreamingResponse(self)
+
+ def create_tokens(
+ self,
+ *,
+ code: str | Omit = omit,
+ redirect_url: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DropboxCreateTokensResponse:
+ """
+ To obtain the refresh token, needed for creation of data sources, send a GET
+ request to `/v2/gen-ai/oauth2/dropbox/tokens`. Pass the code you obtrained from
+ the oauth flow in the field 'code'
+
+ Args:
+ code: The oauth2 code from google
+
+ redirect_url: Redirect url
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/oauth2/dropbox/tokens"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/oauth2/dropbox/tokens",
+ body=maybe_transform(
+ {
+ "code": code,
+ "redirect_url": redirect_url,
+ },
+ dropbox_create_tokens_params.DropboxCreateTokensParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DropboxCreateTokensResponse,
+ )
+
+
+class AsyncDropboxResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncDropboxResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncDropboxResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncDropboxResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncDropboxResourceWithStreamingResponse(self)
+
+ async def create_tokens(
+ self,
+ *,
+ code: str | Omit = omit,
+ redirect_url: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DropboxCreateTokensResponse:
+ """
+ To obtain the refresh token, needed for creation of data sources, send a GET
+ request to `/v2/gen-ai/oauth2/dropbox/tokens`. Pass the code you obtrained from
+ the oauth flow in the field 'code'
+
+ Args:
+ code: The oauth2 code from google
+
+ redirect_url: Redirect url
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/oauth2/dropbox/tokens"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/oauth2/dropbox/tokens",
+ body=await async_maybe_transform(
+ {
+ "code": code,
+ "redirect_url": redirect_url,
+ },
+ dropbox_create_tokens_params.DropboxCreateTokensParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DropboxCreateTokensResponse,
+ )
+
+
+class DropboxResourceWithRawResponse:
+ def __init__(self, dropbox: DropboxResource) -> None:
+ self._dropbox = dropbox
+
+ self.create_tokens = to_raw_response_wrapper(
+ dropbox.create_tokens,
+ )
+
+
+class AsyncDropboxResourceWithRawResponse:
+ def __init__(self, dropbox: AsyncDropboxResource) -> None:
+ self._dropbox = dropbox
+
+ self.create_tokens = async_to_raw_response_wrapper(
+ dropbox.create_tokens,
+ )
+
+
+class DropboxResourceWithStreamingResponse:
+ def __init__(self, dropbox: DropboxResource) -> None:
+ self._dropbox = dropbox
+
+ self.create_tokens = to_streamed_response_wrapper(
+ dropbox.create_tokens,
+ )
+
+
+class AsyncDropboxResourceWithStreamingResponse:
+ def __init__(self, dropbox: AsyncDropboxResource) -> None:
+ self._dropbox = dropbox
+
+ self.create_tokens = async_to_streamed_response_wrapper(
+ dropbox.create_tokens,
+ )
diff --git a/src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py b/src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py
new file mode 100644
index 00000000..0cf47ca6
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py
@@ -0,0 +1,255 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from .dropbox import (
+ DropboxResource,
+ AsyncDropboxResource,
+ DropboxResourceWithRawResponse,
+ AsyncDropboxResourceWithRawResponse,
+ DropboxResourceWithStreamingResponse,
+ AsyncDropboxResourceWithStreamingResponse,
+)
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ....._base_client import make_request_options
+from .....types.agents.evaluation_metrics import oauth2_generate_url_params
+from .....types.agents.evaluation_metrics.oauth2_generate_url_response import Oauth2GenerateURLResponse
+
+__all__ = ["Oauth2Resource", "AsyncOauth2Resource"]
+
+
+class Oauth2Resource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def dropbox(self) -> DropboxResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return DropboxResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> Oauth2ResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return Oauth2ResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> Oauth2ResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return Oauth2ResourceWithStreamingResponse(self)
+
+ def generate_url(
+ self,
+ *,
+ redirect_url: str | Omit = omit,
+ type: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Oauth2GenerateURLResponse:
+ """
+ To generate an Oauth2-URL for use with your localhost, send a GET request to
+ `/v2/gen-ai/oauth2/url`. Pass 'http://localhost:3000 as redirect_url
+
+ Args:
+ redirect_url: The redirect url.
+
+ type: Type "google" / "dropbox".
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/oauth2/url"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/oauth2/url",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "redirect_url": redirect_url,
+ "type": type,
+ },
+ oauth2_generate_url_params.Oauth2GenerateURLParams,
+ ),
+ ),
+ cast_to=Oauth2GenerateURLResponse,
+ )
+
+
+class AsyncOauth2Resource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def dropbox(self) -> AsyncDropboxResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncDropboxResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncOauth2ResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncOauth2ResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncOauth2ResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncOauth2ResourceWithStreamingResponse(self)
+
+ async def generate_url(
+ self,
+ *,
+ redirect_url: str | Omit = omit,
+ type: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Oauth2GenerateURLResponse:
+ """
+ To generate an Oauth2-URL for use with your localhost, send a GET request to
+ `/v2/gen-ai/oauth2/url`. Pass 'http://localhost:3000 as redirect_url
+
+ Args:
+ redirect_url: The redirect url.
+
+ type: Type "google" / "dropbox".
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/oauth2/url"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/oauth2/url",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "redirect_url": redirect_url,
+ "type": type,
+ },
+ oauth2_generate_url_params.Oauth2GenerateURLParams,
+ ),
+ ),
+ cast_to=Oauth2GenerateURLResponse,
+ )
+
+
+class Oauth2ResourceWithRawResponse:
+ def __init__(self, oauth2: Oauth2Resource) -> None:
+ self._oauth2 = oauth2
+
+ self.generate_url = to_raw_response_wrapper(
+ oauth2.generate_url,
+ )
+
+ @cached_property
+ def dropbox(self) -> DropboxResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return DropboxResourceWithRawResponse(self._oauth2.dropbox)
+
+
+class AsyncOauth2ResourceWithRawResponse:
+ def __init__(self, oauth2: AsyncOauth2Resource) -> None:
+ self._oauth2 = oauth2
+
+ self.generate_url = async_to_raw_response_wrapper(
+ oauth2.generate_url,
+ )
+
+ @cached_property
+ def dropbox(self) -> AsyncDropboxResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncDropboxResourceWithRawResponse(self._oauth2.dropbox)
+
+
+class Oauth2ResourceWithStreamingResponse:
+ def __init__(self, oauth2: Oauth2Resource) -> None:
+ self._oauth2 = oauth2
+
+ self.generate_url = to_streamed_response_wrapper(
+ oauth2.generate_url,
+ )
+
+ @cached_property
+ def dropbox(self) -> DropboxResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return DropboxResourceWithStreamingResponse(self._oauth2.dropbox)
+
+
+class AsyncOauth2ResourceWithStreamingResponse:
+ def __init__(self, oauth2: AsyncOauth2Resource) -> None:
+ self._oauth2 = oauth2
+
+ self.generate_url = async_to_streamed_response_wrapper(
+ oauth2.generate_url,
+ )
+
+ @cached_property
+ def dropbox(self) -> AsyncDropboxResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncDropboxResourceWithStreamingResponse(self._oauth2.dropbox)
diff --git a/src/gradient/resources/agents/evaluation_metrics/openai/__init__.py b/src/gradient/resources/agents/evaluation_metrics/openai/__init__.py
new file mode 100644
index 00000000..66d8ca7a
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/openai/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from .openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "KeysResource",
+ "AsyncKeysResource",
+ "KeysResourceWithRawResponse",
+ "AsyncKeysResourceWithRawResponse",
+ "KeysResourceWithStreamingResponse",
+ "AsyncKeysResourceWithStreamingResponse",
+ "OpenAIResource",
+ "AsyncOpenAIResource",
+ "OpenAIResourceWithRawResponse",
+ "AsyncOpenAIResourceWithRawResponse",
+ "OpenAIResourceWithStreamingResponse",
+ "AsyncOpenAIResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/agents/evaluation_metrics/openai/keys.py b/src/gradient/resources/agents/evaluation_metrics/openai/keys.py
new file mode 100644
index 00000000..ffe4992d
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/openai/keys.py
@@ -0,0 +1,715 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ....._utils import maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ....._base_client import make_request_options
+from .....types.agents.evaluation_metrics.openai import (
+ key_list_params,
+ key_create_params,
+ key_update_params,
+ key_list_agents_params,
+)
+from .....types.agents.evaluation_metrics.openai.key_list_response import KeyListResponse
+from .....types.agents.evaluation_metrics.openai.key_create_response import KeyCreateResponse
+from .....types.agents.evaluation_metrics.openai.key_delete_response import KeyDeleteResponse
+from .....types.agents.evaluation_metrics.openai.key_update_response import KeyUpdateResponse
+from .....types.agents.evaluation_metrics.openai.key_retrieve_response import KeyRetrieveResponse
+from .....types.agents.evaluation_metrics.openai.key_list_agents_response import KeyListAgentsResponse
+
+__all__ = ["KeysResource", "AsyncKeysResource"]
+
+
+class KeysResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> KeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return KeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> KeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return KeysResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ api_key: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyCreateResponse:
+ """
+ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ api_key: OpenAI API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an OpenAI API key, send a GET request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | Omit = omit,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyUpdateResponse:
+ """
+ To update an OpenAI API key, send a PUT request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: OpenAI API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyListResponse:
+ """
+ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an OpenAI API key, send a DELETE request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyListAgentsResponse:
+ """
+ List Agents by OpenAI Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/openai/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_agents_params.KeyListAgentsParams,
+ ),
+ ),
+ cast_to=KeyListAgentsResponse,
+ )
+
+
+class AsyncKeysResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncKeysResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ api_key: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyCreateResponse:
+ """
+ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ api_key: OpenAI API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an OpenAI API key, send a GET request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | Omit = omit,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyUpdateResponse:
+ """
+ To update an OpenAI API key, send a PUT request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: OpenAI API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyListResponse:
+ """
+ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an OpenAI API key, send a DELETE request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ async def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyListAgentsResponse:
+ """
+ List Agents by OpenAI Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/openai/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_agents_params.KeyListAgentsParams,
+ ),
+ ),
+ cast_to=KeyListAgentsResponse,
+ )
+
+
+class KeysResourceWithRawResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = to_raw_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class AsyncKeysResourceWithRawResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = async_to_raw_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class KeysResourceWithStreamingResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = to_streamed_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class AsyncKeysResourceWithStreamingResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = async_to_streamed_response_wrapper(
+ keys.list_agents,
+ )
diff --git a/src/gradient/resources/agents/evaluation_metrics/openai/openai.py b/src/gradient/resources/agents/evaluation_metrics/openai/openai.py
new file mode 100644
index 00000000..4963c752
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/openai/openai.py
@@ -0,0 +1,120 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["OpenAIResource", "AsyncOpenAIResource"]
+
+
+class OpenAIResource(SyncAPIResource):
+ @cached_property
+ def keys(self) -> KeysResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return KeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> OpenAIResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return OpenAIResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return OpenAIResourceWithStreamingResponse(self)
+
+
+class AsyncOpenAIResource(AsyncAPIResource):
+ @cached_property
+ def keys(self) -> AsyncKeysResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncKeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncOpenAIResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncOpenAIResourceWithStreamingResponse(self)
+
+
+class OpenAIResourceWithRawResponse:
+ def __init__(self, openai: OpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> KeysResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return KeysResourceWithRawResponse(self._openai.keys)
+
+
+class AsyncOpenAIResourceWithRawResponse:
+ def __init__(self, openai: AsyncOpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncKeysResourceWithRawResponse(self._openai.keys)
+
+
+class OpenAIResourceWithStreamingResponse:
+ def __init__(self, openai: OpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> KeysResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return KeysResourceWithStreamingResponse(self._openai.keys)
+
+
+class AsyncOpenAIResourceWithStreamingResponse:
+ def __init__(self, openai: AsyncOpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncKeysResourceWithStreamingResponse(self._openai.keys)
diff --git a/src/gradient/resources/agents/evaluation_metrics/scheduled_indexing.py b/src/gradient/resources/agents/evaluation_metrics/scheduled_indexing.py
new file mode 100644
index 00000000..b3da363c
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/scheduled_indexing.py
@@ -0,0 +1,385 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.agents.evaluation_metrics import scheduled_indexing_create_params
+from ....types.agents.evaluation_metrics.scheduled_indexing_create_response import ScheduledIndexingCreateResponse
+from ....types.agents.evaluation_metrics.scheduled_indexing_delete_response import ScheduledIndexingDeleteResponse
+from ....types.agents.evaluation_metrics.scheduled_indexing_retrieve_response import ScheduledIndexingRetrieveResponse
+
+__all__ = ["ScheduledIndexingResource", "AsyncScheduledIndexingResource"]
+
+
+class ScheduledIndexingResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> ScheduledIndexingResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ScheduledIndexingResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ScheduledIndexingResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ScheduledIndexingResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ days: Iterable[int] | Omit = omit,
+ knowledge_base_uuid: str | Omit = omit,
+ time: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingCreateResponse:
+ """
+ To create scheduled indexing for a knowledge base, send a POST request to
+ `/v2/gen-ai/scheduled-indexing`.
+
+ Args:
+ days: Days for execution (day is represented same as in a cron expression, e.g. Monday
+ begins with 1 )
+
+ knowledge_base_uuid: Knowledge base uuid for which the schedule is created
+
+ time: Time of execution (HH:MM) UTC
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/scheduled-indexing"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/scheduled-indexing",
+ body=maybe_transform(
+ {
+ "days": days,
+ "knowledge_base_uuid": knowledge_base_uuid,
+ "time": time,
+ },
+ scheduled_indexing_create_params.ScheduledIndexingCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingRetrieveResponse:
+ """
+ Get Scheduled Indexing for knowledge base using knoweldge base uuid, send a GET
+ request to `/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return self._get(
+ f"/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingRetrieveResponse,
+ )
+
+ def delete(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingDeleteResponse:
+ """
+ Delete Scheduled Indexing for knowledge base, send a DELETE request to
+ `/v2/gen-ai/scheduled-indexing/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/scheduled-indexing/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/{uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingDeleteResponse,
+ )
+
+
+class AsyncScheduledIndexingResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncScheduledIndexingResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncScheduledIndexingResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncScheduledIndexingResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncScheduledIndexingResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ days: Iterable[int] | Omit = omit,
+ knowledge_base_uuid: str | Omit = omit,
+ time: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingCreateResponse:
+ """
+ To create scheduled indexing for a knowledge base, send a POST request to
+ `/v2/gen-ai/scheduled-indexing`.
+
+ Args:
+ days: Days for execution (day is represented same as in a cron expression, e.g. Monday
+ begins with 1 )
+
+ knowledge_base_uuid: Knowledge base uuid for which the schedule is created
+
+ time: Time of execution (HH:MM) UTC
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/scheduled-indexing"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/scheduled-indexing",
+ body=await async_maybe_transform(
+ {
+ "days": days,
+ "knowledge_base_uuid": knowledge_base_uuid,
+ "time": time,
+ },
+ scheduled_indexing_create_params.ScheduledIndexingCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingRetrieveResponse:
+ """
+ Get Scheduled Indexing for knowledge base using knoweldge base uuid, send a GET
+ request to `/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingRetrieveResponse,
+ )
+
+ async def delete(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingDeleteResponse:
+ """
+ Delete Scheduled Indexing for knowledge base, send a DELETE request to
+ `/v2/gen-ai/scheduled-indexing/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/scheduled-indexing/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/{uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingDeleteResponse,
+ )
+
+
+class ScheduledIndexingResourceWithRawResponse:
+ def __init__(self, scheduled_indexing: ScheduledIndexingResource) -> None:
+ self._scheduled_indexing = scheduled_indexing
+
+ self.create = to_raw_response_wrapper(
+ scheduled_indexing.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ scheduled_indexing.retrieve,
+ )
+ self.delete = to_raw_response_wrapper(
+ scheduled_indexing.delete,
+ )
+
+
+class AsyncScheduledIndexingResourceWithRawResponse:
+ def __init__(self, scheduled_indexing: AsyncScheduledIndexingResource) -> None:
+ self._scheduled_indexing = scheduled_indexing
+
+ self.create = async_to_raw_response_wrapper(
+ scheduled_indexing.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ scheduled_indexing.retrieve,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ scheduled_indexing.delete,
+ )
+
+
+class ScheduledIndexingResourceWithStreamingResponse:
+ def __init__(self, scheduled_indexing: ScheduledIndexingResource) -> None:
+ self._scheduled_indexing = scheduled_indexing
+
+ self.create = to_streamed_response_wrapper(
+ scheduled_indexing.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ scheduled_indexing.retrieve,
+ )
+ self.delete = to_streamed_response_wrapper(
+ scheduled_indexing.delete,
+ )
+
+
+class AsyncScheduledIndexingResourceWithStreamingResponse:
+ def __init__(self, scheduled_indexing: AsyncScheduledIndexingResource) -> None:
+ self._scheduled_indexing = scheduled_indexing
+
+ self.create = async_to_streamed_response_wrapper(
+ scheduled_indexing.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ scheduled_indexing.retrieve,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ scheduled_indexing.delete,
+ )
diff --git a/src/gradient/resources/agents/evaluation_metrics/workspaces/__init__.py b/src/gradient/resources/agents/evaluation_metrics/workspaces/__init__.py
new file mode 100644
index 00000000..79d75f90
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/workspaces/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .agents import (
+ AgentsResource,
+ AsyncAgentsResource,
+ AgentsResourceWithRawResponse,
+ AsyncAgentsResourceWithRawResponse,
+ AgentsResourceWithStreamingResponse,
+ AsyncAgentsResourceWithStreamingResponse,
+)
+from .workspaces import (
+ WorkspacesResource,
+ AsyncWorkspacesResource,
+ WorkspacesResourceWithRawResponse,
+ AsyncWorkspacesResourceWithRawResponse,
+ WorkspacesResourceWithStreamingResponse,
+ AsyncWorkspacesResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "AgentsResource",
+ "AsyncAgentsResource",
+ "AgentsResourceWithRawResponse",
+ "AsyncAgentsResourceWithRawResponse",
+ "AgentsResourceWithStreamingResponse",
+ "AsyncAgentsResourceWithStreamingResponse",
+ "WorkspacesResource",
+ "AsyncWorkspacesResource",
+ "WorkspacesResourceWithRawResponse",
+ "AsyncWorkspacesResourceWithRawResponse",
+ "WorkspacesResourceWithStreamingResponse",
+ "AsyncWorkspacesResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py b/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py
new file mode 100644
index 00000000..41f3f993
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py
@@ -0,0 +1,334 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ....._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ....._utils import maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ....._base_client import make_request_options
+from .....types.agents.evaluation_metrics.workspaces import agent_list_params, agent_move_params
+from .....types.agents.evaluation_metrics.workspaces.agent_list_response import AgentListResponse
+from .....types.agents.evaluation_metrics.workspaces.agent_move_response import AgentMoveResponse
+
+__all__ = ["AgentsResource", "AsyncAgentsResource"]
+
+
+class AgentsResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AgentsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AgentsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AgentsResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ workspace_uuid: str,
+ *,
+ only_deployed: bool | Omit = omit,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentListResponse:
+ """
+ To list all agents by a Workspace, send a GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/agents`.
+
+ Args:
+ only_deployed: Only list agents that are deployed.
+
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "only_deployed": only_deployed,
+ "page": page,
+ "per_page": per_page,
+ },
+ agent_list_params.AgentListParams,
+ ),
+ ),
+ cast_to=AgentListResponse,
+ )
+
+ def move(
+ self,
+ path_workspace_uuid: str,
+ *,
+ agent_uuids: SequenceNotStr[str] | Omit = omit,
+ body_workspace_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentMoveResponse:
+ """
+ To move all listed agents a given workspace, send a PUT request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/agents`.
+
+ Args:
+ agent_uuids: Agent uuids
+
+ body_workspace_uuid: Workspace uuid to move agents to
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_workspace_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}"
+ )
+ return self._put(
+ f"/v2/gen-ai/workspaces/{path_workspace_uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}/agents",
+ body=maybe_transform(
+ {
+ "agent_uuids": agent_uuids,
+ "body_workspace_uuid": body_workspace_uuid,
+ },
+ agent_move_params.AgentMoveParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentMoveResponse,
+ )
+
+
+class AsyncAgentsResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAgentsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncAgentsResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ workspace_uuid: str,
+ *,
+ only_deployed: bool | Omit = omit,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentListResponse:
+ """
+ To list all agents by a Workspace, send a GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/agents`.
+
+ Args:
+ only_deployed: Only list agents that are deployed.
+
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "only_deployed": only_deployed,
+ "page": page,
+ "per_page": per_page,
+ },
+ agent_list_params.AgentListParams,
+ ),
+ ),
+ cast_to=AgentListResponse,
+ )
+
+ async def move(
+ self,
+ path_workspace_uuid: str,
+ *,
+ agent_uuids: SequenceNotStr[str] | Omit = omit,
+ body_workspace_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AgentMoveResponse:
+ """
+ To move all listed agents a given workspace, send a PUT request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/agents`.
+
+ Args:
+ agent_uuids: Agent uuids
+
+ body_workspace_uuid: Workspace uuid to move agents to
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_workspace_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}"
+ )
+ return await self._put(
+ f"/v2/gen-ai/workspaces/{path_workspace_uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}/agents",
+ body=await async_maybe_transform(
+ {
+ "agent_uuids": agent_uuids,
+ "body_workspace_uuid": body_workspace_uuid,
+ },
+ agent_move_params.AgentMoveParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentMoveResponse,
+ )
+
+
+class AgentsResourceWithRawResponse:
+ def __init__(self, agents: AgentsResource) -> None:
+ self._agents = agents
+
+ self.list = to_raw_response_wrapper(
+ agents.list,
+ )
+ self.move = to_raw_response_wrapper(
+ agents.move,
+ )
+
+
+class AsyncAgentsResourceWithRawResponse:
+ def __init__(self, agents: AsyncAgentsResource) -> None:
+ self._agents = agents
+
+ self.list = async_to_raw_response_wrapper(
+ agents.list,
+ )
+ self.move = async_to_raw_response_wrapper(
+ agents.move,
+ )
+
+
+class AgentsResourceWithStreamingResponse:
+ def __init__(self, agents: AgentsResource) -> None:
+ self._agents = agents
+
+ self.list = to_streamed_response_wrapper(
+ agents.list,
+ )
+ self.move = to_streamed_response_wrapper(
+ agents.move,
+ )
+
+
+class AsyncAgentsResourceWithStreamingResponse:
+ def __init__(self, agents: AsyncAgentsResource) -> None:
+ self._agents = agents
+
+ self.list = async_to_streamed_response_wrapper(
+ agents.list,
+ )
+ self.move = async_to_streamed_response_wrapper(
+ agents.move,
+ )
diff --git a/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py
new file mode 100644
index 00000000..7c2be668
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py
@@ -0,0 +1,698 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from .agents import (
+ AgentsResource,
+ AsyncAgentsResource,
+ AgentsResourceWithRawResponse,
+ AsyncAgentsResourceWithRawResponse,
+ AgentsResourceWithStreamingResponse,
+ AsyncAgentsResourceWithStreamingResponse,
+)
+from ....._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ....._utils import maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ....._base_client import make_request_options
+from .....types.agents.evaluation_metrics import workspace_create_params, workspace_update_params
+from .....types.agents.evaluation_metrics.workspace_list_response import WorkspaceListResponse
+from .....types.agents.evaluation_metrics.workspace_create_response import WorkspaceCreateResponse
+from .....types.agents.evaluation_metrics.workspace_delete_response import WorkspaceDeleteResponse
+from .....types.agents.evaluation_metrics.workspace_update_response import WorkspaceUpdateResponse
+from .....types.agents.evaluation_metrics.workspace_retrieve_response import WorkspaceRetrieveResponse
+from .....types.agents.evaluation_metrics.workspace_list_evaluation_test_cases_response import (
+ WorkspaceListEvaluationTestCasesResponse,
+)
+
+__all__ = ["WorkspacesResource", "AsyncWorkspacesResource"]
+
+
+class WorkspacesResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def agents(self) -> AgentsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AgentsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> WorkspacesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return WorkspacesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> WorkspacesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return WorkspacesResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ agent_uuids: SequenceNotStr[str] | Omit = omit,
+ description: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceCreateResponse:
+ """To create a new workspace, send a POST request to `/v2/gen-ai/workspaces`.
+
+ The
+ response body contains a JSON object with the newly created workspace object.
+
+ Args:
+ agent_uuids: Ids of the agents(s) to attach to the workspace
+
+ description: Description of the workspace
+
+ name: Name of the workspace
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/workspaces"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/workspaces",
+ body=maybe_transform(
+ {
+ "agent_uuids": agent_uuids,
+ "description": description,
+ "name": name,
+ },
+ workspace_create_params.WorkspaceCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceRetrieveResponse:
+ """
+ To retrieve details of a workspace, GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object
+ containing the workspace.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_workspace_uuid: str,
+ *,
+ description: str | Omit = omit,
+ name: str | Omit = omit,
+ body_workspace_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceUpdateResponse:
+ """
+ To update a workspace, send a PUT request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object
+ containing the workspace.
+
+ Args:
+ description: The new description of the workspace
+
+ name: The new name of the workspace
+
+ body_workspace_uuid: Workspace UUID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_workspace_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}"
+ )
+ return self._put(
+ f"/v2/gen-ai/workspaces/{path_workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}",
+ body=maybe_transform(
+ {
+ "description": description,
+ "name": name,
+ "body_workspace_uuid": body_workspace_uuid,
+ },
+ workspace_update_params.WorkspaceUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceListResponse:
+ """To list all workspaces, send a GET request to `/v2/gen-ai/workspaces`."""
+ return self._get(
+ "/v2/gen-ai/workspaces"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/workspaces",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceListResponse,
+ )
+
+ def delete(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceDeleteResponse:
+ """
+ To delete a workspace, send a DELETE request to
+ `/v2/gen-ai/workspace/{workspace_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceDeleteResponse,
+ )
+
+ def list_evaluation_test_cases(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceListEvaluationTestCasesResponse:
+ """
+ To list all evaluation test cases by a workspace, send a GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceListEvaluationTestCasesResponse,
+ )
+
+
+class AsyncWorkspacesResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def agents(self) -> AsyncAgentsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAgentsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncWorkspacesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncWorkspacesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncWorkspacesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncWorkspacesResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ agent_uuids: SequenceNotStr[str] | Omit = omit,
+ description: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceCreateResponse:
+ """To create a new workspace, send a POST request to `/v2/gen-ai/workspaces`.
+
+ The
+ response body contains a JSON object with the newly created workspace object.
+
+ Args:
+ agent_uuids: Ids of the agents(s) to attach to the workspace
+
+ description: Description of the workspace
+
+ name: Name of the workspace
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/workspaces"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/workspaces",
+ body=await async_maybe_transform(
+ {
+ "agent_uuids": agent_uuids,
+ "description": description,
+ "name": name,
+ },
+ workspace_create_params.WorkspaceCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceRetrieveResponse:
+ """
+ To retrieve details of a workspace, GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object
+ containing the workspace.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_workspace_uuid: str,
+ *,
+ description: str | Omit = omit,
+ name: str | Omit = omit,
+ body_workspace_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceUpdateResponse:
+ """
+ To update a workspace, send a PUT request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object
+ containing the workspace.
+
+ Args:
+ description: The new description of the workspace
+
+ name: The new name of the workspace
+
+ body_workspace_uuid: Workspace UUID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_workspace_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}"
+ )
+ return await self._put(
+ f"/v2/gen-ai/workspaces/{path_workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}",
+ body=await async_maybe_transform(
+ {
+ "description": description,
+ "name": name,
+ "body_workspace_uuid": body_workspace_uuid,
+ },
+ workspace_update_params.WorkspaceUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceListResponse:
+ """To list all workspaces, send a GET request to `/v2/gen-ai/workspaces`."""
+ return await self._get(
+ "/v2/gen-ai/workspaces"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/workspaces",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceListResponse,
+ )
+
+ async def delete(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceDeleteResponse:
+ """
+ To delete a workspace, send a DELETE request to
+ `/v2/gen-ai/workspace/{workspace_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceDeleteResponse,
+ )
+
+ async def list_evaluation_test_cases(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> WorkspaceListEvaluationTestCasesResponse:
+ """
+ To list all evaluation test cases by a workspace, send a GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceListEvaluationTestCasesResponse,
+ )
+
+
+class WorkspacesResourceWithRawResponse:
+ def __init__(self, workspaces: WorkspacesResource) -> None:
+ self._workspaces = workspaces
+
+ self.create = to_raw_response_wrapper(
+ workspaces.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ workspaces.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ workspaces.update,
+ )
+ self.list = to_raw_response_wrapper(
+ workspaces.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ workspaces.delete,
+ )
+ self.list_evaluation_test_cases = to_raw_response_wrapper(
+ workspaces.list_evaluation_test_cases,
+ )
+
+ @cached_property
+ def agents(self) -> AgentsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AgentsResourceWithRawResponse(self._workspaces.agents)
+
+
+class AsyncWorkspacesResourceWithRawResponse:
+ def __init__(self, workspaces: AsyncWorkspacesResource) -> None:
+ self._workspaces = workspaces
+
+ self.create = async_to_raw_response_wrapper(
+ workspaces.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ workspaces.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ workspaces.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ workspaces.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ workspaces.delete,
+ )
+ self.list_evaluation_test_cases = async_to_raw_response_wrapper(
+ workspaces.list_evaluation_test_cases,
+ )
+
+ @cached_property
+ def agents(self) -> AsyncAgentsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAgentsResourceWithRawResponse(self._workspaces.agents)
+
+
+class WorkspacesResourceWithStreamingResponse:
+ def __init__(self, workspaces: WorkspacesResource) -> None:
+ self._workspaces = workspaces
+
+ self.create = to_streamed_response_wrapper(
+ workspaces.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ workspaces.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ workspaces.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ workspaces.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ workspaces.delete,
+ )
+ self.list_evaluation_test_cases = to_streamed_response_wrapper(
+ workspaces.list_evaluation_test_cases,
+ )
+
+ @cached_property
+ def agents(self) -> AgentsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AgentsResourceWithStreamingResponse(self._workspaces.agents)
+
+
+class AsyncWorkspacesResourceWithStreamingResponse:
+ def __init__(self, workspaces: AsyncWorkspacesResource) -> None:
+ self._workspaces = workspaces
+
+ self.create = async_to_streamed_response_wrapper(
+ workspaces.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ workspaces.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ workspaces.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ workspaces.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ workspaces.delete,
+ )
+ self.list_evaluation_test_cases = async_to_streamed_response_wrapper(
+ workspaces.list_evaluation_test_cases,
+ )
+
+ @cached_property
+ def agents(self) -> AsyncAgentsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAgentsResourceWithStreamingResponse(self._workspaces.agents)
diff --git a/src/gradient/resources/agents/evaluation_runs.py b/src/gradient/resources/agents/evaluation_runs.py
new file mode 100644
index 00000000..50d51156
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_runs.py
@@ -0,0 +1,516 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.agents import evaluation_run_create_params, evaluation_run_list_results_params
+from ...types.agents.evaluation_run_create_response import EvaluationRunCreateResponse
+from ...types.agents.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse
+from ...types.agents.evaluation_run_list_results_response import EvaluationRunListResultsResponse
+from ...types.agents.evaluation_run_retrieve_results_response import EvaluationRunRetrieveResultsResponse
+
+__all__ = ["EvaluationRunsResource", "AsyncEvaluationRunsResource"]
+
+
+class EvaluationRunsResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> EvaluationRunsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return EvaluationRunsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> EvaluationRunsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return EvaluationRunsResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ agent_deployment_names: SequenceNotStr[str] | Omit = omit,
+ agent_uuids: SequenceNotStr[str] | Omit = omit,
+ run_name: str | Omit = omit,
+ test_case_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationRunCreateResponse:
+ """
+ To run an evaluation test case, send a POST request to
+ `/v2/gen-ai/evaluation_runs`.
+
+ Args:
+ agent_deployment_names: Agent deployment names to run the test case against (ADK agent workspaces).
+
+ agent_uuids: Agent UUIDs to run the test case against (legacy agents).
+
+ run_name: The name of the run.
+
+ test_case_uuid: Test-case UUID to run
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/evaluation_runs"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_runs",
+ body=maybe_transform(
+ {
+ "agent_deployment_names": agent_deployment_names,
+ "agent_uuids": agent_uuids,
+ "run_name": run_name,
+ "test_case_uuid": test_case_uuid,
+ },
+ evaluation_run_create_params.EvaluationRunCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationRunCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ evaluation_run_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationRunRetrieveResponse:
+ """
+ To retrive information about an existing evaluation run, send a GET request to
+ `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_run_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
+ )
+ return self._get(
+ f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationRunRetrieveResponse,
+ )
+
+ def list_results(
+ self,
+ evaluation_run_uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationRunListResultsResponse:
+ """
+ To retrieve results of an evaluation run, send a GET request to
+ `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_run_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
+ )
+ return self._get(
+ f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ evaluation_run_list_results_params.EvaluationRunListResultsParams,
+ ),
+ ),
+ cast_to=EvaluationRunListResultsResponse,
+ )
+
+ def retrieve_results(
+ self,
+ prompt_id: int,
+ *,
+ evaluation_run_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationRunRetrieveResultsResponse:
+ """
+ To retrieve results of an evaluation run, send a GET request to
+ `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_run_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
+ )
+ return self._get(
+ f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationRunRetrieveResultsResponse,
+ )
+
+
+class AsyncEvaluationRunsResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncEvaluationRunsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncEvaluationRunsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncEvaluationRunsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncEvaluationRunsResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ agent_deployment_names: SequenceNotStr[str] | Omit = omit,
+ agent_uuids: SequenceNotStr[str] | Omit = omit,
+ run_name: str | Omit = omit,
+ test_case_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationRunCreateResponse:
+ """
+ To run an evaluation test case, send a POST request to
+ `/v2/gen-ai/evaluation_runs`.
+
+ Args:
+ agent_deployment_names: Agent deployment names to run the test case against (ADK agent workspaces).
+
+ agent_uuids: Agent UUIDs to run the test case against (legacy agents).
+
+ run_name: The name of the run.
+
+ test_case_uuid: Test-case UUID to run
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/evaluation_runs"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_runs",
+ body=await async_maybe_transform(
+ {
+ "agent_deployment_names": agent_deployment_names,
+ "agent_uuids": agent_uuids,
+ "run_name": run_name,
+ "test_case_uuid": test_case_uuid,
+ },
+ evaluation_run_create_params.EvaluationRunCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationRunCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ evaluation_run_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationRunRetrieveResponse:
+ """
+ To retrive information about an existing evaluation run, send a GET request to
+ `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_run_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationRunRetrieveResponse,
+ )
+
+ async def list_results(
+ self,
+ evaluation_run_uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationRunListResultsResponse:
+ """
+ To retrieve results of an evaluation run, send a GET request to
+ `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_run_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ evaluation_run_list_results_params.EvaluationRunListResultsParams,
+ ),
+ ),
+ cast_to=EvaluationRunListResultsResponse,
+ )
+
+ async def retrieve_results(
+ self,
+ prompt_id: int,
+ *,
+ evaluation_run_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationRunRetrieveResultsResponse:
+ """
+ To retrieve results of an evaluation run, send a GET request to
+ `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_run_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationRunRetrieveResultsResponse,
+ )
+
+
+class EvaluationRunsResourceWithRawResponse:
+ def __init__(self, evaluation_runs: EvaluationRunsResource) -> None:
+ self._evaluation_runs = evaluation_runs
+
+ self.create = to_raw_response_wrapper(
+ evaluation_runs.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ evaluation_runs.retrieve,
+ )
+ self.list_results = to_raw_response_wrapper(
+ evaluation_runs.list_results,
+ )
+ self.retrieve_results = to_raw_response_wrapper(
+ evaluation_runs.retrieve_results,
+ )
+
+
+class AsyncEvaluationRunsResourceWithRawResponse:
+ def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None:
+ self._evaluation_runs = evaluation_runs
+
+ self.create = async_to_raw_response_wrapper(
+ evaluation_runs.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ evaluation_runs.retrieve,
+ )
+ self.list_results = async_to_raw_response_wrapper(
+ evaluation_runs.list_results,
+ )
+ self.retrieve_results = async_to_raw_response_wrapper(
+ evaluation_runs.retrieve_results,
+ )
+
+
+class EvaluationRunsResourceWithStreamingResponse:
+ def __init__(self, evaluation_runs: EvaluationRunsResource) -> None:
+ self._evaluation_runs = evaluation_runs
+
+ self.create = to_streamed_response_wrapper(
+ evaluation_runs.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ evaluation_runs.retrieve,
+ )
+ self.list_results = to_streamed_response_wrapper(
+ evaluation_runs.list_results,
+ )
+ self.retrieve_results = to_streamed_response_wrapper(
+ evaluation_runs.retrieve_results,
+ )
+
+
+class AsyncEvaluationRunsResourceWithStreamingResponse:
+ def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None:
+ self._evaluation_runs = evaluation_runs
+
+ self.create = async_to_streamed_response_wrapper(
+ evaluation_runs.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ evaluation_runs.retrieve,
+ )
+ self.list_results = async_to_streamed_response_wrapper(
+ evaluation_runs.list_results,
+ )
+ self.retrieve_results = async_to_streamed_response_wrapper(
+ evaluation_runs.retrieve_results,
+ )
diff --git a/src/gradient/resources/agents/evaluation_test_cases.py b/src/gradient/resources/agents/evaluation_test_cases.py
new file mode 100644
index 00000000..cb47155d
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_test_cases.py
@@ -0,0 +1,653 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.agents import (
+ evaluation_test_case_create_params,
+ evaluation_test_case_update_params,
+ evaluation_test_case_retrieve_params,
+ evaluation_test_case_list_evaluation_runs_params,
+)
+from ...types.agents.api_star_metric_param import APIStarMetricParam
+from ...types.agents.evaluation_test_case_list_response import EvaluationTestCaseListResponse
+from ...types.agents.evaluation_test_case_create_response import EvaluationTestCaseCreateResponse
+from ...types.agents.evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse
+from ...types.agents.evaluation_test_case_retrieve_response import EvaluationTestCaseRetrieveResponse
+from ...types.agents.evaluation_test_case_list_evaluation_runs_response import (
+ EvaluationTestCaseListEvaluationRunsResponse,
+)
+
+__all__ = ["EvaluationTestCasesResource", "AsyncEvaluationTestCasesResource"]
+
+
+class EvaluationTestCasesResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> EvaluationTestCasesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return EvaluationTestCasesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> EvaluationTestCasesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return EvaluationTestCasesResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ agent_workspace_name: str | Omit = omit,
+ dataset_uuid: str | Omit = omit,
+ description: str | Omit = omit,
+ metrics: SequenceNotStr[str] | Omit = omit,
+ name: str | Omit = omit,
+ star_metric: APIStarMetricParam | Omit = omit,
+ workspace_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationTestCaseCreateResponse:
+ """
+ To create an evaluation test-case send a POST request to
+ `/v2/gen-ai/evaluation_test_cases`.
+
+ Args:
+ dataset_uuid: Dataset against which the test‑case is executed.
+
+ description: Description of the test case.
+
+ metrics: Full metric list to use for evaluation test case.
+
+ name: Name of the test case.
+
+ workspace_uuid: The workspace uuid.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/evaluation_test_cases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases",
+ body=maybe_transform(
+ {
+ "agent_workspace_name": agent_workspace_name,
+ "dataset_uuid": dataset_uuid,
+ "description": description,
+ "metrics": metrics,
+ "name": name,
+ "star_metric": star_metric,
+ "workspace_uuid": workspace_uuid,
+ },
+ evaluation_test_case_create_params.EvaluationTestCaseCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationTestCaseCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ test_case_uuid: str,
+ *,
+ evaluation_test_case_version: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationTestCaseRetrieveResponse:
+ """
+ To retrive information about an existing evaluation test case, send a GET
+ request to `/v2/gen-ai/evaluation_test_case/{test_case_uuid}`.
+
+ Args:
+ evaluation_test_case_version: Version of the test case.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not test_case_uuid:
+ raise ValueError(f"Expected a non-empty value for `test_case_uuid` but received {test_case_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/evaluation_test_cases/{test_case_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{test_case_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {"evaluation_test_case_version": evaluation_test_case_version},
+ evaluation_test_case_retrieve_params.EvaluationTestCaseRetrieveParams,
+ ),
+ ),
+ cast_to=EvaluationTestCaseRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_test_case_uuid: str,
+ *,
+ dataset_uuid: str | Omit = omit,
+ description: str | Omit = omit,
+ metrics: evaluation_test_case_update_params.Metrics | Omit = omit,
+ name: str | Omit = omit,
+ star_metric: APIStarMetricParam | Omit = omit,
+ body_test_case_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationTestCaseUpdateResponse:
+ """
+ To update an evaluation test-case send a PUT request to
+ `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`.
+
+ Args:
+ dataset_uuid: Dataset against which the test‑case is executed.
+
+ description: Description of the test case.
+
+ name: Name of the test case.
+
+ body_test_case_uuid: Test-case UUID to update
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_test_case_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}"
+ )
+ return self._put(
+ f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}",
+ body=maybe_transform(
+ {
+ "dataset_uuid": dataset_uuid,
+ "description": description,
+ "metrics": metrics,
+ "name": name,
+ "star_metric": star_metric,
+ "body_test_case_uuid": body_test_case_uuid,
+ },
+ evaluation_test_case_update_params.EvaluationTestCaseUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationTestCaseUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationTestCaseListResponse:
+ """
+ To list all evaluation test cases, send a GET request to
+ `/v2/gen-ai/evaluation_test_cases`.
+ """
+ return self._get(
+ "/v2/gen-ai/evaluation_test_cases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationTestCaseListResponse,
+ )
+
+ def list_evaluation_runs(
+ self,
+ evaluation_test_case_uuid: str,
+ *,
+ evaluation_test_case_version: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationTestCaseListEvaluationRunsResponse:
+ """
+ To list all evaluation runs by test case, send a GET request to
+ `/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs`.
+
+ Args:
+ evaluation_test_case_version: Version of the test case.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_test_case_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_test_case_uuid` but received {evaluation_test_case_uuid!r}"
+ )
+ return self._get(
+ f"/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {"evaluation_test_case_version": evaluation_test_case_version},
+ evaluation_test_case_list_evaluation_runs_params.EvaluationTestCaseListEvaluationRunsParams,
+ ),
+ ),
+ cast_to=EvaluationTestCaseListEvaluationRunsResponse,
+ )
+
+
+class AsyncEvaluationTestCasesResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncEvaluationTestCasesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncEvaluationTestCasesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncEvaluationTestCasesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncEvaluationTestCasesResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ agent_workspace_name: str | Omit = omit,
+ dataset_uuid: str | Omit = omit,
+ description: str | Omit = omit,
+ metrics: SequenceNotStr[str] | Omit = omit,
+ name: str | Omit = omit,
+ star_metric: APIStarMetricParam | Omit = omit,
+ workspace_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationTestCaseCreateResponse:
+ """
+ To create an evaluation test-case send a POST request to
+ `/v2/gen-ai/evaluation_test_cases`.
+
+ Args:
+ dataset_uuid: Dataset against which the test‑case is executed.
+
+ description: Description of the test case.
+
+ metrics: Full metric list to use for evaluation test case.
+
+ name: Name of the test case.
+
+ workspace_uuid: The workspace uuid.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/evaluation_test_cases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases",
+ body=await async_maybe_transform(
+ {
+ "agent_workspace_name": agent_workspace_name,
+ "dataset_uuid": dataset_uuid,
+ "description": description,
+ "metrics": metrics,
+ "name": name,
+ "star_metric": star_metric,
+ "workspace_uuid": workspace_uuid,
+ },
+ evaluation_test_case_create_params.EvaluationTestCaseCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationTestCaseCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ test_case_uuid: str,
+ *,
+ evaluation_test_case_version: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationTestCaseRetrieveResponse:
+ """
+ To retrive information about an existing evaluation test case, send a GET
+ request to `/v2/gen-ai/evaluation_test_case/{test_case_uuid}`.
+
+ Args:
+ evaluation_test_case_version: Version of the test case.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not test_case_uuid:
+ raise ValueError(f"Expected a non-empty value for `test_case_uuid` but received {test_case_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/evaluation_test_cases/{test_case_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{test_case_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {"evaluation_test_case_version": evaluation_test_case_version},
+ evaluation_test_case_retrieve_params.EvaluationTestCaseRetrieveParams,
+ ),
+ ),
+ cast_to=EvaluationTestCaseRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_test_case_uuid: str,
+ *,
+ dataset_uuid: str | Omit = omit,
+ description: str | Omit = omit,
+ metrics: evaluation_test_case_update_params.Metrics | Omit = omit,
+ name: str | Omit = omit,
+ star_metric: APIStarMetricParam | Omit = omit,
+ body_test_case_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationTestCaseUpdateResponse:
+ """
+ To update an evaluation test-case send a PUT request to
+ `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`.
+
+ Args:
+ dataset_uuid: Dataset against which the test‑case is executed.
+
+ description: Description of the test case.
+
+ name: Name of the test case.
+
+ body_test_case_uuid: Test-case UUID to update
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_test_case_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}"
+ )
+ return await self._put(
+ f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}",
+ body=await async_maybe_transform(
+ {
+ "dataset_uuid": dataset_uuid,
+ "description": description,
+ "metrics": metrics,
+ "name": name,
+ "star_metric": star_metric,
+ "body_test_case_uuid": body_test_case_uuid,
+ },
+ evaluation_test_case_update_params.EvaluationTestCaseUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationTestCaseUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationTestCaseListResponse:
+ """
+ To list all evaluation test cases, send a GET request to
+ `/v2/gen-ai/evaluation_test_cases`.
+ """
+ return await self._get(
+ "/v2/gen-ai/evaluation_test_cases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationTestCaseListResponse,
+ )
+
+ async def list_evaluation_runs(
+ self,
+ evaluation_test_case_uuid: str,
+ *,
+ evaluation_test_case_version: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> EvaluationTestCaseListEvaluationRunsResponse:
+ """
+ To list all evaluation runs by test case, send a GET request to
+ `/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs`.
+
+ Args:
+ evaluation_test_case_version: Version of the test case.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_test_case_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_test_case_uuid` but received {evaluation_test_case_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {"evaluation_test_case_version": evaluation_test_case_version},
+ evaluation_test_case_list_evaluation_runs_params.EvaluationTestCaseListEvaluationRunsParams,
+ ),
+ ),
+ cast_to=EvaluationTestCaseListEvaluationRunsResponse,
+ )
+
+
+class EvaluationTestCasesResourceWithRawResponse:
+ def __init__(self, evaluation_test_cases: EvaluationTestCasesResource) -> None:
+ self._evaluation_test_cases = evaluation_test_cases
+
+ self.create = to_raw_response_wrapper(
+ evaluation_test_cases.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ evaluation_test_cases.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ evaluation_test_cases.update,
+ )
+ self.list = to_raw_response_wrapper(
+ evaluation_test_cases.list,
+ )
+ self.list_evaluation_runs = to_raw_response_wrapper(
+ evaluation_test_cases.list_evaluation_runs,
+ )
+
+
+class AsyncEvaluationTestCasesResourceWithRawResponse:
+ def __init__(self, evaluation_test_cases: AsyncEvaluationTestCasesResource) -> None:
+ self._evaluation_test_cases = evaluation_test_cases
+
+ self.create = async_to_raw_response_wrapper(
+ evaluation_test_cases.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ evaluation_test_cases.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ evaluation_test_cases.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ evaluation_test_cases.list,
+ )
+ self.list_evaluation_runs = async_to_raw_response_wrapper(
+ evaluation_test_cases.list_evaluation_runs,
+ )
+
+
+class EvaluationTestCasesResourceWithStreamingResponse:
+ def __init__(self, evaluation_test_cases: EvaluationTestCasesResource) -> None:
+ self._evaluation_test_cases = evaluation_test_cases
+
+ self.create = to_streamed_response_wrapper(
+ evaluation_test_cases.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ evaluation_test_cases.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ evaluation_test_cases.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ evaluation_test_cases.list,
+ )
+ self.list_evaluation_runs = to_streamed_response_wrapper(
+ evaluation_test_cases.list_evaluation_runs,
+ )
+
+
+class AsyncEvaluationTestCasesResourceWithStreamingResponse:
+ def __init__(self, evaluation_test_cases: AsyncEvaluationTestCasesResource) -> None:
+ self._evaluation_test_cases = evaluation_test_cases
+
+ self.create = async_to_streamed_response_wrapper(
+ evaluation_test_cases.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ evaluation_test_cases.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ evaluation_test_cases.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ evaluation_test_cases.list,
+ )
+ self.list_evaluation_runs = async_to_streamed_response_wrapper(
+ evaluation_test_cases.list_evaluation_runs,
+ )
diff --git a/src/gradient/resources/agents/functions.py b/src/gradient/resources/agents/functions.py
new file mode 100644
index 00000000..fc58d899
--- /dev/null
+++ b/src/gradient/resources/agents/functions.py
@@ -0,0 +1,501 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.agents import function_create_params, function_update_params
+from ...types.agents.function_create_response import FunctionCreateResponse
+from ...types.agents.function_delete_response import FunctionDeleteResponse
+from ...types.agents.function_update_response import FunctionUpdateResponse
+
+__all__ = ["FunctionsResource", "AsyncFunctionsResource"]
+
+
+class FunctionsResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> FunctionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return FunctionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return FunctionsResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ path_agent_uuid: str,
+ *,
+ body_agent_uuid: str | Omit = omit,
+ description: str | Omit = omit,
+ faas_name: str | Omit = omit,
+ faas_namespace: str | Omit = omit,
+ function_name: str | Omit = omit,
+ input_schema: object | Omit = omit,
+ output_schema: object | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FunctionCreateResponse:
+ """
+ To create a function route for an agent, send a POST request to
+ `/v2/gen-ai/agents/{agent_uuid}/functions`.
+
+ Args:
+ body_agent_uuid: Agent id
+
+ description: Function description
+
+ faas_name: The name of the function in the DigitalOcean functions platform
+
+ faas_namespace: The namespace of the function in the DigitalOcean functions platform
+
+ function_name: Function name
+
+ input_schema: Describe the input schema for the function so the agent may call it
+
+ output_schema: Describe the output schema for the function so the agent handle its response
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}")
+ return self._post(
+ f"/v2/gen-ai/agents/{path_agent_uuid}/functions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions",
+ body=maybe_transform(
+ {
+ "body_agent_uuid": body_agent_uuid,
+ "description": description,
+ "faas_name": faas_name,
+ "faas_namespace": faas_namespace,
+ "function_name": function_name,
+ "input_schema": input_schema,
+ "output_schema": output_schema,
+ },
+ function_create_params.FunctionCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FunctionCreateResponse,
+ )
+
+ def update(
+ self,
+ path_function_uuid: str,
+ *,
+ path_agent_uuid: str,
+ body_agent_uuid: str | Omit = omit,
+ description: str | Omit = omit,
+ faas_name: str | Omit = omit,
+ faas_namespace: str | Omit = omit,
+ function_name: str | Omit = omit,
+ body_function_uuid: str | Omit = omit,
+ input_schema: object | Omit = omit,
+ output_schema: object | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FunctionUpdateResponse:
+ """
+ To update the function route, send a PUT request to
+ `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`.
+
+ Args:
+ body_agent_uuid: Agent id
+
+ description: Funciton description
+
+ faas_name: The name of the function in the DigitalOcean functions platform
+
+ faas_namespace: The namespace of the function in the DigitalOcean functions platform
+
+ function_name: Function name
+
+ body_function_uuid: Function id
+
+ input_schema: Describe the input schema for the function so the agent may call it
+
+ output_schema: Describe the output schema for the function so the agent handle its response
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}")
+ if not path_function_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}",
+ body=maybe_transform(
+ {
+ "body_agent_uuid": body_agent_uuid,
+ "description": description,
+ "faas_name": faas_name,
+ "faas_namespace": faas_namespace,
+ "function_name": function_name,
+ "body_function_uuid": body_function_uuid,
+ "input_schema": input_schema,
+ "output_schema": output_schema,
+ },
+ function_update_params.FunctionUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FunctionUpdateResponse,
+ )
+
+ def delete(
+ self,
+ function_uuid: str,
+ *,
+ agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FunctionDeleteResponse:
+ """
+ To delete a function route from an agent, send a DELETE request to
+ `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ if not function_uuid:
+ raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FunctionDeleteResponse,
+ )
+
+
+class AsyncFunctionsResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncFunctionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncFunctionsResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ path_agent_uuid: str,
+ *,
+ body_agent_uuid: str | Omit = omit,
+ description: str | Omit = omit,
+ faas_name: str | Omit = omit,
+ faas_namespace: str | Omit = omit,
+ function_name: str | Omit = omit,
+ input_schema: object | Omit = omit,
+ output_schema: object | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FunctionCreateResponse:
+ """
+ To create a function route for an agent, send a POST request to
+ `/v2/gen-ai/agents/{agent_uuid}/functions`.
+
+ Args:
+ body_agent_uuid: Agent id
+
+ description: Function description
+
+ faas_name: The name of the function in the DigitalOcean functions platform
+
+ faas_namespace: The namespace of the function in the DigitalOcean functions platform
+
+ function_name: Function name
+
+ input_schema: Describe the input schema for the function so the agent may call it
+
+ output_schema: Describe the output schema for the function so the agent handle its response
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}")
+ return await self._post(
+ f"/v2/gen-ai/agents/{path_agent_uuid}/functions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions",
+ body=await async_maybe_transform(
+ {
+ "body_agent_uuid": body_agent_uuid,
+ "description": description,
+ "faas_name": faas_name,
+ "faas_namespace": faas_namespace,
+ "function_name": function_name,
+ "input_schema": input_schema,
+ "output_schema": output_schema,
+ },
+ function_create_params.FunctionCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FunctionCreateResponse,
+ )
+
+ async def update(
+ self,
+ path_function_uuid: str,
+ *,
+ path_agent_uuid: str,
+ body_agent_uuid: str | Omit = omit,
+ description: str | Omit = omit,
+ faas_name: str | Omit = omit,
+ faas_namespace: str | Omit = omit,
+ function_name: str | Omit = omit,
+ body_function_uuid: str | Omit = omit,
+ input_schema: object | Omit = omit,
+ output_schema: object | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FunctionUpdateResponse:
+ """
+ To update the function route, send a PUT request to
+ `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`.
+
+ Args:
+ body_agent_uuid: Agent id
+
+ description: Funciton description
+
+ faas_name: The name of the function in the DigitalOcean functions platform
+
+ faas_namespace: The namespace of the function in the DigitalOcean functions platform
+
+ function_name: Function name
+
+ body_function_uuid: Function id
+
+ input_schema: Describe the input schema for the function so the agent may call it
+
+ output_schema: Describe the output schema for the function so the agent handle its response
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}")
+ if not path_function_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}",
+ body=await async_maybe_transform(
+ {
+ "body_agent_uuid": body_agent_uuid,
+ "description": description,
+ "faas_name": faas_name,
+ "faas_namespace": faas_namespace,
+ "function_name": function_name,
+ "body_function_uuid": body_function_uuid,
+ "input_schema": input_schema,
+ "output_schema": output_schema,
+ },
+ function_update_params.FunctionUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FunctionUpdateResponse,
+ )
+
+ async def delete(
+ self,
+ function_uuid: str,
+ *,
+ agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FunctionDeleteResponse:
+ """
+ To delete a function route from an agent, send a DELETE request to
+ `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ if not function_uuid:
+ raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FunctionDeleteResponse,
+ )
+
+
+class FunctionsResourceWithRawResponse:
+ def __init__(self, functions: FunctionsResource) -> None:
+ self._functions = functions
+
+ self.create = to_raw_response_wrapper(
+ functions.create,
+ )
+ self.update = to_raw_response_wrapper(
+ functions.update,
+ )
+ self.delete = to_raw_response_wrapper(
+ functions.delete,
+ )
+
+
+class AsyncFunctionsResourceWithRawResponse:
+ def __init__(self, functions: AsyncFunctionsResource) -> None:
+ self._functions = functions
+
+ self.create = async_to_raw_response_wrapper(
+ functions.create,
+ )
+ self.update = async_to_raw_response_wrapper(
+ functions.update,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ functions.delete,
+ )
+
+
+class FunctionsResourceWithStreamingResponse:
+ def __init__(self, functions: FunctionsResource) -> None:
+ self._functions = functions
+
+ self.create = to_streamed_response_wrapper(
+ functions.create,
+ )
+ self.update = to_streamed_response_wrapper(
+ functions.update,
+ )
+ self.delete = to_streamed_response_wrapper(
+ functions.delete,
+ )
+
+
+class AsyncFunctionsResourceWithStreamingResponse:
+ def __init__(self, functions: AsyncFunctionsResource) -> None:
+ self._functions = functions
+
+ self.create = async_to_streamed_response_wrapper(
+ functions.create,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ functions.update,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ functions.delete,
+ )
diff --git a/src/gradient/resources/agents/knowledge_bases.py b/src/gradient/resources/agents/knowledge_bases.py
new file mode 100644
index 00000000..57268294
--- /dev/null
+++ b/src/gradient/resources/agents/knowledge_bases.py
@@ -0,0 +1,366 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Query, Headers, NotGiven, not_given
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput
+from ...types.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse
+
+__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"]
+
+
+class KnowledgeBasesResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return KnowledgeBasesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return KnowledgeBasesResourceWithStreamingResponse(self)
+
+ def attach(
+ self,
+ agent_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APILinkKnowledgeBaseOutput:
+ """
+ To attach knowledge bases to an agent, send a POST request to
+ `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases`
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ return self._post(
+ f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APILinkKnowledgeBaseOutput,
+ )
+
+ def attach_single(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APILinkKnowledgeBaseOutput:
+ """
+ To attach a knowledge base to an agent, send a POST request to
+ `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return self._post(
+ f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APILinkKnowledgeBaseOutput,
+ )
+
+ def detach(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseDetachResponse:
+ """
+ To detach a knowledge base from an agent, send a DELETE request to
+ `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return self._delete(
+ f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KnowledgeBaseDetachResponse,
+ )
+
+
+class AsyncKnowledgeBasesResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncKnowledgeBasesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncKnowledgeBasesResourceWithStreamingResponse(self)
+
+ async def attach(
+ self,
+ agent_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APILinkKnowledgeBaseOutput:
+ """
+ To attach knowledge bases to an agent, send a POST request to
+ `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases`
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ return await self._post(
+ f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APILinkKnowledgeBaseOutput,
+ )
+
+ async def attach_single(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APILinkKnowledgeBaseOutput:
+ """
+ To attach a knowledge base to an agent, send a POST request to
+ `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return await self._post(
+ f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APILinkKnowledgeBaseOutput,
+ )
+
+ async def detach(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseDetachResponse:
+ """
+ To detach a knowledge base from an agent, send a DELETE request to
+ `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}")
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return await self._delete(
+ f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KnowledgeBaseDetachResponse,
+ )
+
+
+class KnowledgeBasesResourceWithRawResponse:
+ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None:
+ self._knowledge_bases = knowledge_bases
+
+ self.attach = to_raw_response_wrapper(
+ knowledge_bases.attach,
+ )
+ self.attach_single = to_raw_response_wrapper(
+ knowledge_bases.attach_single,
+ )
+ self.detach = to_raw_response_wrapper(
+ knowledge_bases.detach,
+ )
+
+
+class AsyncKnowledgeBasesResourceWithRawResponse:
+ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None:
+ self._knowledge_bases = knowledge_bases
+
+ self.attach = async_to_raw_response_wrapper(
+ knowledge_bases.attach,
+ )
+ self.attach_single = async_to_raw_response_wrapper(
+ knowledge_bases.attach_single,
+ )
+ self.detach = async_to_raw_response_wrapper(
+ knowledge_bases.detach,
+ )
+
+
+class KnowledgeBasesResourceWithStreamingResponse:
+ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None:
+ self._knowledge_bases = knowledge_bases
+
+ self.attach = to_streamed_response_wrapper(
+ knowledge_bases.attach,
+ )
+ self.attach_single = to_streamed_response_wrapper(
+ knowledge_bases.attach_single,
+ )
+ self.detach = to_streamed_response_wrapper(
+ knowledge_bases.detach,
+ )
+
+
+class AsyncKnowledgeBasesResourceWithStreamingResponse:
+ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None:
+ self._knowledge_bases = knowledge_bases
+
+ self.attach = async_to_streamed_response_wrapper(
+ knowledge_bases.attach,
+ )
+ self.attach_single = async_to_streamed_response_wrapper(
+ knowledge_bases.attach_single,
+ )
+ self.detach = async_to_streamed_response_wrapper(
+ knowledge_bases.detach,
+ )
diff --git a/src/gradient/resources/agents/routes.py b/src/gradient/resources/agents/routes.py
new file mode 100644
index 00000000..3f80e8e8
--- /dev/null
+++ b/src/gradient/resources/agents/routes.py
@@ -0,0 +1,556 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.agents import route_add_params, route_update_params
+from ...types.agents.route_add_response import RouteAddResponse
+from ...types.agents.route_view_response import RouteViewResponse
+from ...types.agents.route_delete_response import RouteDeleteResponse
+from ...types.agents.route_update_response import RouteUpdateResponse
+
+__all__ = ["RoutesResource", "AsyncRoutesResource"]
+
+
+class RoutesResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> RoutesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return RoutesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RoutesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return RoutesResourceWithStreamingResponse(self)
+
+ def update(
+ self,
+ path_child_agent_uuid: str,
+ *,
+ path_parent_agent_uuid: str,
+ body_child_agent_uuid: str | Omit = omit,
+ if_case: str | Omit = omit,
+ body_parent_agent_uuid: str | Omit = omit,
+ route_name: str | Omit = omit,
+ uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RouteUpdateResponse:
+ """
+ To update an agent route for an agent, send a PUT request to
+ `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
+
+ Args:
+ body_child_agent_uuid: Routed agent id
+
+ if_case: Describes the case in which the child agent should be used
+
+ body_parent_agent_uuid: A unique identifier for the parent agent.
+
+ route_name: Route name
+
+ uuid: Unique id of linkage
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_parent_agent_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}"
+ )
+ if not path_child_agent_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}"
+ )
+ return self._put(
+ f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}",
+ body=maybe_transform(
+ {
+ "body_child_agent_uuid": body_child_agent_uuid,
+ "if_case": if_case,
+ "body_parent_agent_uuid": body_parent_agent_uuid,
+ "route_name": route_name,
+ "uuid": uuid,
+ },
+ route_update_params.RouteUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=RouteUpdateResponse,
+ )
+
+ def delete(
+ self,
+ child_agent_uuid: str,
+ *,
+ parent_agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RouteDeleteResponse:
+ """
+ To delete an agent route from a parent agent, send a DELETE request to
+ `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not parent_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}")
+ if not child_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=RouteDeleteResponse,
+ )
+
+ def add(
+ self,
+ path_child_agent_uuid: str,
+ *,
+ path_parent_agent_uuid: str,
+ body_child_agent_uuid: str | Omit = omit,
+ if_case: str | Omit = omit,
+ body_parent_agent_uuid: str | Omit = omit,
+ route_name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RouteAddResponse:
+ """
+ To add an agent route to an agent, send a POST request to
+ `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
+
+ Args:
+ body_child_agent_uuid: Routed agent id
+
+ body_parent_agent_uuid: A unique identifier for the parent agent.
+
+ route_name: Name of route
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_parent_agent_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}"
+ )
+ if not path_child_agent_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}"
+ )
+ return self._post(
+ f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}",
+ body=maybe_transform(
+ {
+ "body_child_agent_uuid": body_child_agent_uuid,
+ "if_case": if_case,
+ "body_parent_agent_uuid": body_parent_agent_uuid,
+ "route_name": route_name,
+ },
+ route_add_params.RouteAddParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=RouteAddResponse,
+ )
+
+ def view(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RouteViewResponse:
+ """
+ To view agent routes for an agent, send a GET requtest to
+ `/v2/gen-ai/agents/{uuid}/child_agents`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/agents/{uuid}/child_agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/child_agents",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=RouteViewResponse,
+ )
+
+
+class AsyncRoutesResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncRoutesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRoutesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRoutesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncRoutesResourceWithStreamingResponse(self)
+
+ async def update(
+ self,
+ path_child_agent_uuid: str,
+ *,
+ path_parent_agent_uuid: str,
+ body_child_agent_uuid: str | Omit = omit,
+ if_case: str | Omit = omit,
+ body_parent_agent_uuid: str | Omit = omit,
+ route_name: str | Omit = omit,
+ uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RouteUpdateResponse:
+ """
+ To update an agent route for an agent, send a PUT request to
+ `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
+
+ Args:
+ body_child_agent_uuid: Routed agent id
+
+ if_case: Describes the case in which the child agent should be used
+
+ body_parent_agent_uuid: A unique identifier for the parent agent.
+
+ route_name: Route name
+
+ uuid: Unique id of linkage
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_parent_agent_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}"
+ )
+ if not path_child_agent_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}"
+ )
+ return await self._put(
+ f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}",
+ body=await async_maybe_transform(
+ {
+ "body_child_agent_uuid": body_child_agent_uuid,
+ "if_case": if_case,
+ "body_parent_agent_uuid": body_parent_agent_uuid,
+ "route_name": route_name,
+ "uuid": uuid,
+ },
+ route_update_params.RouteUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=RouteUpdateResponse,
+ )
+
+ async def delete(
+ self,
+ child_agent_uuid: str,
+ *,
+ parent_agent_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RouteDeleteResponse:
+ """
+ To delete an agent route from a parent agent, send a DELETE request to
+ `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not parent_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}")
+ if not child_agent_uuid:
+ raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=RouteDeleteResponse,
+ )
+
+ async def add(
+ self,
+ path_child_agent_uuid: str,
+ *,
+ path_parent_agent_uuid: str,
+ body_child_agent_uuid: str | Omit = omit,
+ if_case: str | Omit = omit,
+ body_parent_agent_uuid: str | Omit = omit,
+ route_name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RouteAddResponse:
+ """
+ To add an agent route to an agent, send a POST request to
+ `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
+
+ Args:
+ body_child_agent_uuid: Routed agent id
+
+ body_parent_agent_uuid: A unique identifier for the parent agent.
+
+ route_name: Name of route
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_parent_agent_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}"
+ )
+ if not path_child_agent_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}"
+ )
+ return await self._post(
+ f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}",
+ body=await async_maybe_transform(
+ {
+ "body_child_agent_uuid": body_child_agent_uuid,
+ "if_case": if_case,
+ "body_parent_agent_uuid": body_parent_agent_uuid,
+ "route_name": route_name,
+ },
+ route_add_params.RouteAddParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=RouteAddResponse,
+ )
+
+ async def view(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RouteViewResponse:
+ """
+ To view agent routes for an agent, send a GET requtest to
+ `/v2/gen-ai/agents/{uuid}/child_agents`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/agents/{uuid}/child_agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/child_agents",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=RouteViewResponse,
+ )
+
+
+class RoutesResourceWithRawResponse:
+ def __init__(self, routes: RoutesResource) -> None:
+ self._routes = routes
+
+ self.update = to_raw_response_wrapper(
+ routes.update,
+ )
+ self.delete = to_raw_response_wrapper(
+ routes.delete,
+ )
+ self.add = to_raw_response_wrapper(
+ routes.add,
+ )
+ self.view = to_raw_response_wrapper(
+ routes.view,
+ )
+
+
+class AsyncRoutesResourceWithRawResponse:
+ def __init__(self, routes: AsyncRoutesResource) -> None:
+ self._routes = routes
+
+ self.update = async_to_raw_response_wrapper(
+ routes.update,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ routes.delete,
+ )
+ self.add = async_to_raw_response_wrapper(
+ routes.add,
+ )
+ self.view = async_to_raw_response_wrapper(
+ routes.view,
+ )
+
+
+class RoutesResourceWithStreamingResponse:
+ def __init__(self, routes: RoutesResource) -> None:
+ self._routes = routes
+
+ self.update = to_streamed_response_wrapper(
+ routes.update,
+ )
+ self.delete = to_streamed_response_wrapper(
+ routes.delete,
+ )
+ self.add = to_streamed_response_wrapper(
+ routes.add,
+ )
+ self.view = to_streamed_response_wrapper(
+ routes.view,
+ )
+
+
+class AsyncRoutesResourceWithStreamingResponse:
+ def __init__(self, routes: AsyncRoutesResource) -> None:
+ self._routes = routes
+
+ self.update = async_to_streamed_response_wrapper(
+ routes.update,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ routes.delete,
+ )
+ self.add = async_to_streamed_response_wrapper(
+ routes.add,
+ )
+ self.view = async_to_streamed_response_wrapper(
+ routes.view,
+ )
diff --git a/src/gradient/resources/agents/versions.py b/src/gradient/resources/agents/versions.py
new file mode 100644
index 00000000..90b55087
--- /dev/null
+++ b/src/gradient/resources/agents/versions.py
@@ -0,0 +1,322 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.agents import version_list_params, version_update_params
+from ...types.agents.version_list_response import VersionListResponse
+from ...types.agents.version_update_response import VersionUpdateResponse
+
+__all__ = ["VersionsResource", "AsyncVersionsResource"]
+
+
+class VersionsResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> VersionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return VersionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> VersionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return VersionsResourceWithStreamingResponse(self)
+
+ def update(
+ self,
+ path_uuid: str,
+ *,
+ body_uuid: str | Omit = omit,
+ version_hash: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VersionUpdateResponse:
+ """
+ To update to a specific agent version, send a PUT request to
+ `/v2/gen-ai/agents/{uuid}/versions`.
+
+ Args:
+ body_uuid: Agent unique identifier
+
+ version_hash: Unique identifier
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/agents/{path_uuid}/versions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/versions",
+ body=maybe_transform(
+ {
+ "body_uuid": body_uuid,
+ "version_hash": version_hash,
+ },
+ version_update_params.VersionUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VersionUpdateResponse,
+ )
+
+ def list(
+ self,
+ uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VersionListResponse:
+ """
+ To list all agent versions, send a GET request to
+ `/v2/gen-ai/agents/{uuid}/versions`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/agents/{uuid}/versions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/versions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ version_list_params.VersionListParams,
+ ),
+ ),
+ cast_to=VersionListResponse,
+ )
+
+
+class AsyncVersionsResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncVersionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncVersionsResourceWithStreamingResponse(self)
+
+ async def update(
+ self,
+ path_uuid: str,
+ *,
+ body_uuid: str | Omit = omit,
+ version_hash: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VersionUpdateResponse:
+ """
+ To update to a specific agent version, send a PUT request to
+ `/v2/gen-ai/agents/{uuid}/versions`.
+
+ Args:
+ body_uuid: Agent unique identifier
+
+ version_hash: Unique identifier
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/agents/{path_uuid}/versions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/versions",
+ body=await async_maybe_transform(
+ {
+ "body_uuid": body_uuid,
+ "version_hash": version_hash,
+ },
+ version_update_params.VersionUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VersionUpdateResponse,
+ )
+
+ async def list(
+ self,
+ uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VersionListResponse:
+ """
+ To list all agent versions, send a GET request to
+ `/v2/gen-ai/agents/{uuid}/versions`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/agents/{uuid}/versions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/versions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ version_list_params.VersionListParams,
+ ),
+ ),
+ cast_to=VersionListResponse,
+ )
+
+
+class VersionsResourceWithRawResponse:
+ def __init__(self, versions: VersionsResource) -> None:
+ self._versions = versions
+
+ self.update = to_raw_response_wrapper(
+ versions.update,
+ )
+ self.list = to_raw_response_wrapper(
+ versions.list,
+ )
+
+
+class AsyncVersionsResourceWithRawResponse:
+ def __init__(self, versions: AsyncVersionsResource) -> None:
+ self._versions = versions
+
+ self.update = async_to_raw_response_wrapper(
+ versions.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ versions.list,
+ )
+
+
+class VersionsResourceWithStreamingResponse:
+ def __init__(self, versions: VersionsResource) -> None:
+ self._versions = versions
+
+ self.update = to_streamed_response_wrapper(
+ versions.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ versions.list,
+ )
+
+
+class AsyncVersionsResourceWithStreamingResponse:
+ def __init__(self, versions: AsyncVersionsResource) -> None:
+ self._versions = versions
+
+ self.update = async_to_streamed_response_wrapper(
+ versions.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ versions.list,
+ )
diff --git a/src/gradient/resources/apps/__init__.py b/src/gradient/resources/apps/__init__.py
new file mode 100644
index 00000000..3033a599
--- /dev/null
+++ b/src/gradient/resources/apps/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .apps import (
+ AppsResource,
+ AsyncAppsResource,
+ AppsResourceWithRawResponse,
+ AsyncAppsResourceWithRawResponse,
+ AppsResourceWithStreamingResponse,
+ AsyncAppsResourceWithStreamingResponse,
+)
+from .job_invocations import (
+ JobInvocationsResource,
+ AsyncJobInvocationsResource,
+ JobInvocationsResourceWithRawResponse,
+ AsyncJobInvocationsResourceWithRawResponse,
+ JobInvocationsResourceWithStreamingResponse,
+ AsyncJobInvocationsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "JobInvocationsResource",
+ "AsyncJobInvocationsResource",
+ "JobInvocationsResourceWithRawResponse",
+ "AsyncJobInvocationsResourceWithRawResponse",
+ "JobInvocationsResourceWithStreamingResponse",
+ "AsyncJobInvocationsResourceWithStreamingResponse",
+ "AppsResource",
+ "AsyncAppsResource",
+ "AppsResourceWithRawResponse",
+ "AsyncAppsResourceWithRawResponse",
+ "AppsResourceWithStreamingResponse",
+ "AsyncAppsResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/apps/apps.py b/src/gradient/resources/apps/apps.py
new file mode 100644
index 00000000..1caa9ee7
--- /dev/null
+++ b/src/gradient/resources/apps/apps.py
@@ -0,0 +1,174 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from .job_invocations import (
+ JobInvocationsResource,
+ AsyncJobInvocationsResource,
+ JobInvocationsResourceWithRawResponse,
+ AsyncJobInvocationsResourceWithRawResponse,
+ JobInvocationsResourceWithStreamingResponse,
+ AsyncJobInvocationsResourceWithStreamingResponse,
+)
+
+__all__ = ["AppsResource", "AsyncAppsResource"]
+
+
+class AppsResource(SyncAPIResource):
+ @cached_property
+ def job_invocations(self) -> JobInvocationsResource:
+ """
+ App Platform is a Platform-as-a-Service (PaaS) offering from DigitalOcean that allows
+ developers to publish code directly to DigitalOcean servers without worrying about the
+ underlying infrastructure.
+
+ Most API operations are centered around a few core object types. Following are the
+ definitions of these types. These definitions will be omitted from the operation-specific
+ documentation.
+
+ For documentation on app specifications (`AppSpec` objects), please refer to the
+ [product documentation](https://docs.digitalocean.com/products/app-platform/reference/app-spec/)).
+ """
+ return JobInvocationsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AppsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AppsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AppsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AppsResourceWithStreamingResponse(self)
+
+
+class AsyncAppsResource(AsyncAPIResource):
+ @cached_property
+ def job_invocations(self) -> AsyncJobInvocationsResource:
+ """
+ App Platform is a Platform-as-a-Service (PaaS) offering from DigitalOcean that allows
+ developers to publish code directly to DigitalOcean servers without worrying about the
+ underlying infrastructure.
+
+ Most API operations are centered around a few core object types. Following are the
+ definitions of these types. These definitions will be omitted from the operation-specific
+ documentation.
+
+ For documentation on app specifications (`AppSpec` objects), please refer to the
+ [product documentation](https://docs.digitalocean.com/products/app-platform/reference/app-spec/)).
+ """
+ return AsyncJobInvocationsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAppsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAppsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAppsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncAppsResourceWithStreamingResponse(self)
+
+
+class AppsResourceWithRawResponse:
+ def __init__(self, apps: AppsResource) -> None:
+ self._apps = apps
+
+ @cached_property
+ def job_invocations(self) -> JobInvocationsResourceWithRawResponse:
+ """
+ App Platform is a Platform-as-a-Service (PaaS) offering from DigitalOcean that allows
+ developers to publish code directly to DigitalOcean servers without worrying about the
+ underlying infrastructure.
+
+ Most API operations are centered around a few core object types. Following are the
+ definitions of these types. These definitions will be omitted from the operation-specific
+ documentation.
+
+ For documentation on app specifications (`AppSpec` objects), please refer to the
+ [product documentation](https://docs.digitalocean.com/products/app-platform/reference/app-spec/)).
+ """
+ return JobInvocationsResourceWithRawResponse(self._apps.job_invocations)
+
+
+class AsyncAppsResourceWithRawResponse:
+ def __init__(self, apps: AsyncAppsResource) -> None:
+ self._apps = apps
+
+ @cached_property
+ def job_invocations(self) -> AsyncJobInvocationsResourceWithRawResponse:
+ """
+ App Platform is a Platform-as-a-Service (PaaS) offering from DigitalOcean that allows
+ developers to publish code directly to DigitalOcean servers without worrying about the
+ underlying infrastructure.
+
+ Most API operations are centered around a few core object types. Following are the
+ definitions of these types. These definitions will be omitted from the operation-specific
+ documentation.
+
+ For documentation on app specifications (`AppSpec` objects), please refer to the
+ [product documentation](https://docs.digitalocean.com/products/app-platform/reference/app-spec/)).
+ """
+ return AsyncJobInvocationsResourceWithRawResponse(self._apps.job_invocations)
+
+
+class AppsResourceWithStreamingResponse:
+ def __init__(self, apps: AppsResource) -> None:
+ self._apps = apps
+
+ @cached_property
+ def job_invocations(self) -> JobInvocationsResourceWithStreamingResponse:
+ """
+ App Platform is a Platform-as-a-Service (PaaS) offering from DigitalOcean that allows
+ developers to publish code directly to DigitalOcean servers without worrying about the
+ underlying infrastructure.
+
+ Most API operations are centered around a few core object types. Following are the
+ definitions of these types. These definitions will be omitted from the operation-specific
+ documentation.
+
+ For documentation on app specifications (`AppSpec` objects), please refer to the
+ [product documentation](https://docs.digitalocean.com/products/app-platform/reference/app-spec/)).
+ """
+ return JobInvocationsResourceWithStreamingResponse(self._apps.job_invocations)
+
+
+class AsyncAppsResourceWithStreamingResponse:
+ def __init__(self, apps: AsyncAppsResource) -> None:
+ self._apps = apps
+
+ @cached_property
+ def job_invocations(self) -> AsyncJobInvocationsResourceWithStreamingResponse:
+ """
+ App Platform is a Platform-as-a-Service (PaaS) offering from DigitalOcean that allows
+ developers to publish code directly to DigitalOcean servers without worrying about the
+ underlying infrastructure.
+
+ Most API operations are centered around a few core object types. Following are the
+ definitions of these types. These definitions will be omitted from the operation-specific
+ documentation.
+
+ For documentation on app specifications (`AppSpec` objects), please refer to the
+ [product documentation](https://docs.digitalocean.com/products/app-platform/reference/app-spec/)).
+ """
+ return AsyncJobInvocationsResourceWithStreamingResponse(self._apps.job_invocations)
diff --git a/src/gradient/resources/apps/job_invocations.py b/src/gradient/resources/apps/job_invocations.py
new file mode 100644
index 00000000..02bc5a95
--- /dev/null
+++ b/src/gradient/resources/apps/job_invocations.py
@@ -0,0 +1,217 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...types.apps import job_invocation_cancel_params
+from ..._base_client import make_request_options
+from ...types.apps.job_invocation_cancel_response import JobInvocationCancelResponse
+
+__all__ = ["JobInvocationsResource", "AsyncJobInvocationsResource"]
+
+
+class JobInvocationsResource(SyncAPIResource):
+ """
+ App Platform is a Platform-as-a-Service (PaaS) offering from DigitalOcean that allows
+ developers to publish code directly to DigitalOcean servers without worrying about the
+ underlying infrastructure.
+
+ Most API operations are centered around a few core object types. Following are the
+ definitions of these types. These definitions will be omitted from the operation-specific
+ documentation.
+
+ For documentation on app specifications (`AppSpec` objects), please refer to the
+ [product documentation](https://docs.digitalocean.com/products/app-platform/reference/app-spec/)).
+ """
+
+ @cached_property
+ def with_raw_response(self) -> JobInvocationsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return JobInvocationsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> JobInvocationsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return JobInvocationsResourceWithStreamingResponse(self)
+
+ def cancel(
+ self,
+ job_invocation_id: str,
+ *,
+ app_id: str,
+ job_name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> JobInvocationCancelResponse:
+ """
+ Cancel a specific job invocation for an app.
+
+ Args:
+ job_name: The job name to list job invocations for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not app_id:
+ raise ValueError(f"Expected a non-empty value for `app_id` but received {app_id!r}")
+ if not job_invocation_id:
+ raise ValueError(f"Expected a non-empty value for `job_invocation_id` but received {job_invocation_id!r}")
+ return self._post(
+ f"/v2/apps/{app_id}/job-invocations/{job_invocation_id}/cancel"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/apps/{app_id}/job-invocations/{job_invocation_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"job_name": job_name}, job_invocation_cancel_params.JobInvocationCancelParams),
+ ),
+ cast_to=JobInvocationCancelResponse,
+ )
+
+
+class AsyncJobInvocationsResource(AsyncAPIResource):
+ """
+ App Platform is a Platform-as-a-Service (PaaS) offering from DigitalOcean that allows
+ developers to publish code directly to DigitalOcean servers without worrying about the
+ underlying infrastructure.
+
+ Most API operations are centered around a few core object types. Following are the
+ definitions of these types. These definitions will be omitted from the operation-specific
+ documentation.
+
+ For documentation on app specifications (`AppSpec` objects), please refer to the
+ [product documentation](https://docs.digitalocean.com/products/app-platform/reference/app-spec/)).
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncJobInvocationsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncJobInvocationsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncJobInvocationsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncJobInvocationsResourceWithStreamingResponse(self)
+
+ async def cancel(
+ self,
+ job_invocation_id: str,
+ *,
+ app_id: str,
+ job_name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> JobInvocationCancelResponse:
+ """
+ Cancel a specific job invocation for an app.
+
+ Args:
+ job_name: The job name to list job invocations for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not app_id:
+ raise ValueError(f"Expected a non-empty value for `app_id` but received {app_id!r}")
+ if not job_invocation_id:
+ raise ValueError(f"Expected a non-empty value for `job_invocation_id` but received {job_invocation_id!r}")
+ return await self._post(
+ f"/v2/apps/{app_id}/job-invocations/{job_invocation_id}/cancel"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/apps/{app_id}/job-invocations/{job_invocation_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {"job_name": job_name}, job_invocation_cancel_params.JobInvocationCancelParams
+ ),
+ ),
+ cast_to=JobInvocationCancelResponse,
+ )
+
+
+class JobInvocationsResourceWithRawResponse:
+ def __init__(self, job_invocations: JobInvocationsResource) -> None:
+ self._job_invocations = job_invocations
+
+ self.cancel = to_raw_response_wrapper(
+ job_invocations.cancel,
+ )
+
+
+class AsyncJobInvocationsResourceWithRawResponse:
+ def __init__(self, job_invocations: AsyncJobInvocationsResource) -> None:
+ self._job_invocations = job_invocations
+
+ self.cancel = async_to_raw_response_wrapper(
+ job_invocations.cancel,
+ )
+
+
+class JobInvocationsResourceWithStreamingResponse:
+ def __init__(self, job_invocations: JobInvocationsResource) -> None:
+ self._job_invocations = job_invocations
+
+ self.cancel = to_streamed_response_wrapper(
+ job_invocations.cancel,
+ )
+
+
+class AsyncJobInvocationsResourceWithStreamingResponse:
+ def __init__(self, job_invocations: AsyncJobInvocationsResource) -> None:
+ self._job_invocations = job_invocations
+
+ self.cancel = async_to_streamed_response_wrapper(
+ job_invocations.cancel,
+ )
diff --git a/src/gradient/resources/billing.py b/src/gradient/resources/billing.py
new file mode 100644
index 00000000..8261cfa9
--- /dev/null
+++ b/src/gradient/resources/billing.py
@@ -0,0 +1,284 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from datetime import date
+
+import httpx
+
+from ..types import billing_list_insights_params
+from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.billing_list_insights_response import BillingListInsightsResponse
+
+__all__ = ["BillingResource", "AsyncBillingResource"]
+
+
+class BillingResource(SyncAPIResource):
+ """
+ The billing endpoints allow you to retrieve your account balance, invoices,
+ billing history, and insights.
+
+ **Balance:** By sending requests to the `/v2/customers/my/balance` endpoint, you can
+ retrieve the balance information for the requested customer account.
+
+ **Invoices:** [Invoices](https://docs.digitalocean.com/platform/billing/invoices/)
+ are generated on the first of each month for every DigitalOcean
+ customer. An invoice preview is generated daily, which can be accessed
+ with the `preview` keyword in place of `$INVOICE_UUID`. To interact with
+ invoices, you will generally send requests to the invoices endpoint at
+ `/v2/customers/my/invoices`.
+
+ **Billing History:** Billing history is a record of billing events for your account.
+ For example, entries may include events like payments made, invoices
+ issued, or credits granted. To interact with invoices, you
+ will generally send requests to the invoices endpoint at
+ `/v2/customers/my/billing_history`.
+
+ **Billing Insights:** Day-over-day changes in billing resource usage based on nightly invoice items,
+ including total amount, region, SKU, and description for a specified date range.
+ It is important to note that the daily resource usage may not reflect month-end billing totals when totaled for
+ a given month as nightly invoice items do not necessarily encompass all invoicing factors for the entire month.
+ `v2/billing/{account_urn}/insights/{start_date}/{end_date}` where account_urn is the URN of the customer
+ account, can be a team (do:team:uuid) or an organization (do:teamgroup:uuid). The date range specified by
+ start_date and end_date must be in YYYY-MM-DD format.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> BillingResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return BillingResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> BillingResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return BillingResourceWithStreamingResponse(self)
+
+ def list_insights(
+ self,
+ end_date: Union[str, date],
+ *,
+ account_urn: str,
+ start_date: Union[str, date],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BillingListInsightsResponse:
+ """
+ This endpoint returns day-over-day changes in billing resource usage based on
+ nightly invoice items, including total amount, region, SKU, and description for
+ a specified date range. It is important to note that the daily resource usage
+ may not reflect month-end billing totals when totaled for a given month as
+ nightly invoice item estimates do not necessarily encompass all invoicing
+ factors for the entire month.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not account_urn:
+ raise ValueError(f"Expected a non-empty value for `account_urn` but received {account_urn!r}")
+ if not start_date:
+ raise ValueError(f"Expected a non-empty value for `start_date` but received {start_date!r}")
+ if not end_date:
+ raise ValueError(f"Expected a non-empty value for `end_date` but received {end_date!r}")
+ return self._get(
+ f"/v2/billing/{account_urn}/insights/{start_date}/{end_date}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/billing/{account_urn}/insights/{start_date}/{end_date}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ billing_list_insights_params.BillingListInsightsParams,
+ ),
+ ),
+ cast_to=BillingListInsightsResponse,
+ )
+
+
+class AsyncBillingResource(AsyncAPIResource):
+ """
+ The billing endpoints allow you to retrieve your account balance, invoices,
+ billing history, and insights.
+
+ **Balance:** By sending requests to the `/v2/customers/my/balance` endpoint, you can
+ retrieve the balance information for the requested customer account.
+
+ **Invoices:** [Invoices](https://docs.digitalocean.com/platform/billing/invoices/)
+ are generated on the first of each month for every DigitalOcean
+ customer. An invoice preview is generated daily, which can be accessed
+ with the `preview` keyword in place of `$INVOICE_UUID`. To interact with
+ invoices, you will generally send requests to the invoices endpoint at
+ `/v2/customers/my/invoices`.
+
+ **Billing History:** Billing history is a record of billing events for your account.
+ For example, entries may include events like payments made, invoices
+ issued, or credits granted. To interact with invoices, you
+ will generally send requests to the invoices endpoint at
+ `/v2/customers/my/billing_history`.
+
+ **Billing Insights:** Day-over-day changes in billing resource usage based on nightly invoice items,
+ including total amount, region, SKU, and description for a specified date range.
+ It is important to note that the daily resource usage may not reflect month-end billing totals when totaled for
+ a given month as nightly invoice items do not necessarily encompass all invoicing factors for the entire month.
+ `v2/billing/{account_urn}/insights/{start_date}/{end_date}` where account_urn is the URN of the customer
+ account, can be a team (do:team:uuid) or an organization (do:teamgroup:uuid). The date range specified by
+ start_date and end_date must be in YYYY-MM-DD format.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncBillingResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncBillingResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncBillingResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncBillingResourceWithStreamingResponse(self)
+
+ async def list_insights(
+ self,
+ end_date: Union[str, date],
+ *,
+ account_urn: str,
+ start_date: Union[str, date],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BillingListInsightsResponse:
+ """
+ This endpoint returns day-over-day changes in billing resource usage based on
+ nightly invoice items, including total amount, region, SKU, and description for
+ a specified date range. It is important to note that the daily resource usage
+ may not reflect month-end billing totals when totaled for a given month as
+ nightly invoice item estimates do not necessarily encompass all invoicing
+ factors for the entire month.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not account_urn:
+ raise ValueError(f"Expected a non-empty value for `account_urn` but received {account_urn!r}")
+ if not start_date:
+ raise ValueError(f"Expected a non-empty value for `start_date` but received {start_date!r}")
+ if not end_date:
+ raise ValueError(f"Expected a non-empty value for `end_date` but received {end_date!r}")
+ return await self._get(
+ f"/v2/billing/{account_urn}/insights/{start_date}/{end_date}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/billing/{account_urn}/insights/{start_date}/{end_date}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ billing_list_insights_params.BillingListInsightsParams,
+ ),
+ ),
+ cast_to=BillingListInsightsResponse,
+ )
+
+
+class BillingResourceWithRawResponse:
+ def __init__(self, billing: BillingResource) -> None:
+ self._billing = billing
+
+ self.list_insights = to_raw_response_wrapper(
+ billing.list_insights,
+ )
+
+
+class AsyncBillingResourceWithRawResponse:
+ def __init__(self, billing: AsyncBillingResource) -> None:
+ self._billing = billing
+
+ self.list_insights = async_to_raw_response_wrapper(
+ billing.list_insights,
+ )
+
+
+class BillingResourceWithStreamingResponse:
+ def __init__(self, billing: BillingResource) -> None:
+ self._billing = billing
+
+ self.list_insights = to_streamed_response_wrapper(
+ billing.list_insights,
+ )
+
+
+class AsyncBillingResourceWithStreamingResponse:
+ def __init__(self, billing: AsyncBillingResource) -> None:
+ self._billing = billing
+
+ self.list_insights = async_to_streamed_response_wrapper(
+ billing.list_insights,
+ )
diff --git a/src/gradient/resources/chat/__init__.py b/src/gradient/resources/chat/__init__.py
new file mode 100644
index 00000000..ec960eb4
--- /dev/null
+++ b/src/gradient/resources/chat/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .chat import (
+ ChatResource,
+ AsyncChatResource,
+ ChatResourceWithRawResponse,
+ AsyncChatResourceWithRawResponse,
+ ChatResourceWithStreamingResponse,
+ AsyncChatResourceWithStreamingResponse,
+)
+from .completions import (
+ CompletionsResource,
+ AsyncCompletionsResource,
+ CompletionsResourceWithRawResponse,
+ AsyncCompletionsResourceWithRawResponse,
+ CompletionsResourceWithStreamingResponse,
+ AsyncCompletionsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "CompletionsResource",
+ "AsyncCompletionsResource",
+ "CompletionsResourceWithRawResponse",
+ "AsyncCompletionsResourceWithRawResponse",
+ "CompletionsResourceWithStreamingResponse",
+ "AsyncCompletionsResourceWithStreamingResponse",
+ "ChatResource",
+ "AsyncChatResource",
+ "ChatResourceWithRawResponse",
+ "AsyncChatResourceWithRawResponse",
+ "ChatResourceWithStreamingResponse",
+ "AsyncChatResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/chat/chat.py b/src/gradient/resources/chat/chat.py
new file mode 100644
index 00000000..1175e6db
--- /dev/null
+++ b/src/gradient/resources/chat/chat.py
@@ -0,0 +1,120 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from .completions import (
+ CompletionsResource,
+ AsyncCompletionsResource,
+ CompletionsResourceWithRawResponse,
+ AsyncCompletionsResourceWithRawResponse,
+ CompletionsResourceWithStreamingResponse,
+ AsyncCompletionsResourceWithStreamingResponse,
+)
+
+__all__ = ["ChatResource", "AsyncChatResource"]
+
+
+class ChatResource(SyncAPIResource):
+ @cached_property
+ def completions(self) -> CompletionsResource:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return CompletionsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ChatResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ChatResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ChatResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ChatResourceWithStreamingResponse(self)
+
+
+class AsyncChatResource(AsyncAPIResource):
+ @cached_property
+ def completions(self) -> AsyncCompletionsResource:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return AsyncCompletionsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncChatResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncChatResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncChatResourceWithStreamingResponse(self)
+
+
+class ChatResourceWithRawResponse:
+ def __init__(self, chat: ChatResource) -> None:
+ self._chat = chat
+
+ @cached_property
+ def completions(self) -> CompletionsResourceWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return CompletionsResourceWithRawResponse(self._chat.completions)
+
+
+class AsyncChatResourceWithRawResponse:
+ def __init__(self, chat: AsyncChatResource) -> None:
+ self._chat = chat
+
+ @cached_property
+ def completions(self) -> AsyncCompletionsResourceWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return AsyncCompletionsResourceWithRawResponse(self._chat.completions)
+
+
+class ChatResourceWithStreamingResponse:
+ def __init__(self, chat: ChatResource) -> None:
+ self._chat = chat
+
+ @cached_property
+ def completions(self) -> CompletionsResourceWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return CompletionsResourceWithStreamingResponse(self._chat.completions)
+
+
+class AsyncChatResourceWithStreamingResponse:
+ def __init__(self, chat: AsyncChatResource) -> None:
+ self._chat = chat
+
+ @cached_property
+ def completions(self) -> AsyncCompletionsResourceWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+ return AsyncCompletionsResourceWithStreamingResponse(self._chat.completions)
diff --git a/src/gradient/resources/chat/completions.py b/src/gradient/resources/chat/completions.py
new file mode 100644
index 00000000..2052db35
--- /dev/null
+++ b/src/gradient/resources/chat/completions.py
@@ -0,0 +1,1078 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ..._utils import required_args, maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._streaming import Stream, AsyncStream
+from ...types.chat import completion_create_params
+from ..._base_client import make_request_options
+from ...types.shared.chat_completion_chunk import ChatCompletionChunk
+from ...types.chat.completion_create_response import CompletionCreateResponse
+
+__all__ = ["CompletionsResource", "AsyncCompletionsResource"]
+
+
+class CompletionsResource(SyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> CompletionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return CompletionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return CompletionsResourceWithStreamingResponse(self)
+
+ @overload
+ def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ stream: Literal[True],
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Stream[ChatCompletionChunk]:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ stream: bool,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["messages", "model"], ["messages", "model", "stream"])
+ def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
+ # This method requires an model_access_key to be set via client argument or environment variable
+ if not self._client.model_access_key:
+ raise TypeError(
+ "Could not resolve authentication method. Expected model_access_key to be set for chat completions."
+ )
+ headers = extra_headers or {}
+ headers = {
+ "Authorization": f"Bearer {self._client.model_access_key}",
+ **headers,
+ }
+
+ return self._post(
+ "/chat/completions"
+ if self._client._base_url_overridden
+ else f"{self._client.inference_endpoint}/v1/chat/completions",
+ body=maybe_transform(
+ {
+ "messages": messages,
+ "model": model,
+ "frequency_penalty": frequency_penalty,
+ "logit_bias": logit_bias,
+ "logprobs": logprobs,
+ "max_completion_tokens": max_completion_tokens,
+ "max_tokens": max_tokens,
+ "metadata": metadata,
+ "n": n,
+ "presence_penalty": presence_penalty,
+ "reasoning_effort": reasoning_effort,
+ "stop": stop,
+ "stream": stream,
+ "stream_options": stream_options,
+ "temperature": temperature,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_logprobs": top_logprobs,
+ "top_p": top_p,
+ "user": user,
+ },
+ (
+ completion_create_params.CompletionCreateParamsStreaming
+ if stream
+ else completion_create_params.CompletionCreateParamsNonStreaming
+ ),
+ ),
+ options=make_request_options(
+ extra_headers=headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=CompletionCreateResponse,
+ stream=stream or False,
+ stream_cls=Stream[ChatCompletionChunk],
+ )
+
+
+class AsyncCompletionsResource(AsyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncCompletionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncCompletionsResourceWithStreamingResponse(self)
+
+ @overload
+ async def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ stream: Literal[True],
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncStream[ChatCompletionChunk]:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ stream: bool,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
+ """
+ Creates a model response for the given chat conversation.
+
+ Args:
+ messages: A list of messages comprising the conversation so far.
+
+ model: Model ID used to generate the response.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
+ existing frequency in the text so far, decreasing the model's likelihood to
+ repeat the same line verbatim.
+
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
+ returns the log probabilities of each output token returned in the `content` of
+ `message`.
+
+ max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ n: How many chat completion choices to generate for each input message. Note that
+ you will be charged based on the number of generated tokens across all of the
+ choices. Keep `n` as `1` to minimize costs.
+
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ whether they appear in the text so far, increasing the model's likelihood to
+ talk about new topics.
+
+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
+ can result in faster responses and fewer tokens used on reasoning in a response.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["messages", "model"], ["messages", "model", "stream"])
+ async def create(
+ self,
+ *,
+ messages: Iterable[completion_create_params.Message],
+ model: str,
+ frequency_penalty: Optional[float] | Omit = omit,
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
+ logprobs: Optional[bool] | Omit = omit,
+ max_completion_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: completion_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[completion_create_params.Tool] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
+ # This method requires an model_access_key to be set via client argument or environment variable
+ if not hasattr(self._client, "model_access_key") or not self._client.model_access_key:
+ raise TypeError(
+ "Could not resolve authentication method. Expected model_access_key to be set for chat completions."
+ )
+ headers = extra_headers or {}
+ headers = {
+ "Authorization": f"Bearer {self._client.model_access_key}",
+ **headers,
+ }
+
+ return await self._post(
+ "/chat/completions"
+ if self._client._base_url_overridden
+ else f"{self._client.inference_endpoint}/v1/chat/completions",
+ body=await async_maybe_transform(
+ {
+ "messages": messages,
+ "model": model,
+ "frequency_penalty": frequency_penalty,
+ "logit_bias": logit_bias,
+ "logprobs": logprobs,
+ "max_completion_tokens": max_completion_tokens,
+ "max_tokens": max_tokens,
+ "metadata": metadata,
+ "n": n,
+ "presence_penalty": presence_penalty,
+ "reasoning_effort": reasoning_effort,
+ "stop": stop,
+ "stream": stream,
+ "stream_options": stream_options,
+ "temperature": temperature,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_logprobs": top_logprobs,
+ "top_p": top_p,
+ "user": user,
+ },
+ (
+ completion_create_params.CompletionCreateParamsStreaming
+ if stream
+ else completion_create_params.CompletionCreateParamsNonStreaming
+ ),
+ ),
+ options=make_request_options(
+ extra_headers=headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=CompletionCreateResponse,
+ stream=stream or False,
+ stream_cls=AsyncStream[ChatCompletionChunk],
+ )
+
+
+class CompletionsResourceWithRawResponse:
+ def __init__(self, completions: CompletionsResource) -> None:
+ self._completions = completions
+
+ self.create = to_raw_response_wrapper(
+ completions.create,
+ )
+
+
+class AsyncCompletionsResourceWithRawResponse:
+ def __init__(self, completions: AsyncCompletionsResource) -> None:
+ self._completions = completions
+
+ self.create = async_to_raw_response_wrapper(
+ completions.create,
+ )
+
+
+class CompletionsResourceWithStreamingResponse:
+ def __init__(self, completions: CompletionsResource) -> None:
+ self._completions = completions
+
+ self.create = to_streamed_response_wrapper(
+ completions.create,
+ )
+
+
+class AsyncCompletionsResourceWithStreamingResponse:
+ def __init__(self, completions: AsyncCompletionsResource) -> None:
+ self._completions = completions
+
+ self.create = async_to_streamed_response_wrapper(
+ completions.create,
+ )
diff --git a/src/gradient/resources/databases/__init__.py b/src/gradient/resources/databases/__init__.py
new file mode 100644
index 00000000..40c62ed8
--- /dev/null
+++ b/src/gradient/resources/databases/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .databases import (
+ DatabasesResource,
+ AsyncDatabasesResource,
+ DatabasesResourceWithRawResponse,
+ AsyncDatabasesResourceWithRawResponse,
+ DatabasesResourceWithStreamingResponse,
+ AsyncDatabasesResourceWithStreamingResponse,
+)
+from .schema_registry import (
+ SchemaRegistryResource,
+ AsyncSchemaRegistryResource,
+ SchemaRegistryResourceWithRawResponse,
+ AsyncSchemaRegistryResourceWithRawResponse,
+ SchemaRegistryResourceWithStreamingResponse,
+ AsyncSchemaRegistryResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "SchemaRegistryResource",
+ "AsyncSchemaRegistryResource",
+ "SchemaRegistryResourceWithRawResponse",
+ "AsyncSchemaRegistryResourceWithRawResponse",
+ "SchemaRegistryResourceWithStreamingResponse",
+ "AsyncSchemaRegistryResourceWithStreamingResponse",
+ "DatabasesResource",
+ "AsyncDatabasesResource",
+ "DatabasesResourceWithRawResponse",
+ "AsyncDatabasesResourceWithRawResponse",
+ "DatabasesResourceWithStreamingResponse",
+ "AsyncDatabasesResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/databases/databases.py b/src/gradient/resources/databases/databases.py
new file mode 100644
index 00000000..120ab91f
--- /dev/null
+++ b/src/gradient/resources/databases/databases.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from .schema_registry.schema_registry import (
+ SchemaRegistryResource,
+ AsyncSchemaRegistryResource,
+ SchemaRegistryResourceWithRawResponse,
+ AsyncSchemaRegistryResourceWithRawResponse,
+ SchemaRegistryResourceWithStreamingResponse,
+ AsyncSchemaRegistryResourceWithStreamingResponse,
+)
+
+__all__ = ["DatabasesResource", "AsyncDatabasesResource"]
+
+
+class DatabasesResource(SyncAPIResource):
+ @cached_property
+ def schema_registry(self) -> SchemaRegistryResource:
+ return SchemaRegistryResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> DatabasesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return DatabasesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> DatabasesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return DatabasesResourceWithStreamingResponse(self)
+
+
+class AsyncDatabasesResource(AsyncAPIResource):
+ @cached_property
+ def schema_registry(self) -> AsyncSchemaRegistryResource:
+ return AsyncSchemaRegistryResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncDatabasesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncDatabasesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncDatabasesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncDatabasesResourceWithStreamingResponse(self)
+
+
+class DatabasesResourceWithRawResponse:
+ def __init__(self, databases: DatabasesResource) -> None:
+ self._databases = databases
+
+ @cached_property
+ def schema_registry(self) -> SchemaRegistryResourceWithRawResponse:
+ return SchemaRegistryResourceWithRawResponse(self._databases.schema_registry)
+
+
+class AsyncDatabasesResourceWithRawResponse:
+ def __init__(self, databases: AsyncDatabasesResource) -> None:
+ self._databases = databases
+
+ @cached_property
+ def schema_registry(self) -> AsyncSchemaRegistryResourceWithRawResponse:
+ return AsyncSchemaRegistryResourceWithRawResponse(self._databases.schema_registry)
+
+
+class DatabasesResourceWithStreamingResponse:
+ def __init__(self, databases: DatabasesResource) -> None:
+ self._databases = databases
+
+ @cached_property
+ def schema_registry(self) -> SchemaRegistryResourceWithStreamingResponse:
+ return SchemaRegistryResourceWithStreamingResponse(self._databases.schema_registry)
+
+
+class AsyncDatabasesResourceWithStreamingResponse:
+ def __init__(self, databases: AsyncDatabasesResource) -> None:
+ self._databases = databases
+
+ @cached_property
+ def schema_registry(self) -> AsyncSchemaRegistryResourceWithStreamingResponse:
+ return AsyncSchemaRegistryResourceWithStreamingResponse(self._databases.schema_registry)
diff --git a/src/gradient/resources/databases/schema_registry/__init__.py b/src/gradient/resources/databases/schema_registry/__init__.py
new file mode 100644
index 00000000..2015e4d4
--- /dev/null
+++ b/src/gradient/resources/databases/schema_registry/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .config import (
+ ConfigResource,
+ AsyncConfigResource,
+ ConfigResourceWithRawResponse,
+ AsyncConfigResourceWithRawResponse,
+ ConfigResourceWithStreamingResponse,
+ AsyncConfigResourceWithStreamingResponse,
+)
+from .schema_registry import (
+ SchemaRegistryResource,
+ AsyncSchemaRegistryResource,
+ SchemaRegistryResourceWithRawResponse,
+ AsyncSchemaRegistryResourceWithRawResponse,
+ SchemaRegistryResourceWithStreamingResponse,
+ AsyncSchemaRegistryResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "ConfigResource",
+ "AsyncConfigResource",
+ "ConfigResourceWithRawResponse",
+ "AsyncConfigResourceWithRawResponse",
+ "ConfigResourceWithStreamingResponse",
+ "AsyncConfigResourceWithStreamingResponse",
+ "SchemaRegistryResource",
+ "AsyncSchemaRegistryResource",
+ "SchemaRegistryResourceWithRawResponse",
+ "AsyncSchemaRegistryResourceWithRawResponse",
+ "SchemaRegistryResourceWithStreamingResponse",
+ "AsyncSchemaRegistryResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/databases/schema_registry/config.py b/src/gradient/resources/databases/schema_registry/config.py
new file mode 100644
index 00000000..825bd2ae
--- /dev/null
+++ b/src/gradient/resources/databases/schema_registry/config.py
@@ -0,0 +1,558 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from ...._types import Body, Query, Headers, NotGiven, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.databases.schema_registry import config_update_params, config_update_subject_params
+from ....types.databases.schema_registry.config_update_response import ConfigUpdateResponse
+from ....types.databases.schema_registry.config_retrieve_response import ConfigRetrieveResponse
+from ....types.databases.schema_registry.config_update_subject_response import ConfigUpdateSubjectResponse
+from ....types.databases.schema_registry.config_retrieve_subject_response import ConfigRetrieveSubjectResponse
+
+__all__ = ["ConfigResource", "AsyncConfigResource"]
+
+
+class ConfigResource(SyncAPIResource):
+ """
+ DigitalOcean's [managed database service](https://docs.digitalocean.com/products/databases)
+ simplifies the creation and management of highly available database clusters. Currently, it
+ offers support for [PostgreSQL](http://docs.digitalocean.com/products/databases/postgresql/),
+ [Caching](https://docs.digitalocean.com/products/databases/redis/),
+ [Valkey](https://docs.digitalocean.com/products/databases/valkey/),
+ [MySQL](https://docs.digitalocean.com/products/databases/mysql/),
+ [MongoDB](https://docs.digitalocean.com/products/databases/mongodb/), and
+ [OpenSearch](https://docs.digitalocean.com/products/databases/opensearch/).
+
+ By sending requests to the `/v2/databases` endpoint, you can list, create, or delete
+ database clusters as well as scale the size of a cluster, add or remove read-only replicas,
+ and manage other configuration details.
+
+ Database clusters may be deployed in a multi-node, high-availability configuration.
+ If your machine type is above the basic nodes, your node plan is above the smallest option,
+ or you are running MongoDB, you may additionally include up to two standby nodes in your cluster.
+
+ The size of individual nodes in a database cluster is represented by a human-readable slug,
+ which is used in some of the following requests. Each slug denotes the node's identifier,
+ CPU count, and amount of RAM, in that order.
+
+ For a list of currently available database slugs and options, use the `/v2/databases/options` endpoint or use the
+ `doctl databases options` [command](https://docs.digitalocean.com/reference/doctl/reference/databases/options).
+ """
+
+ @cached_property
+ def with_raw_response(self) -> ConfigResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ConfigResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ConfigResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ConfigResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ database_cluster_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ConfigRetrieveResponse:
+ """
+ To retrieve the Schema Registry configuration for a Kafka cluster, send a GET
+ request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is
+ a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ return self._get(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigRetrieveResponse,
+ )
+
+ def update(
+ self,
+ database_cluster_uuid: str,
+ *,
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ConfigUpdateResponse:
+ """
+ To update the Schema Registry configuration for a Kafka cluster, send a PUT
+ request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is
+ a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ compatibility_level: The compatibility level of the schema registry.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ return self._put(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config",
+ body=maybe_transform({"compatibility_level": compatibility_level}, config_update_params.ConfigUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigUpdateResponse,
+ )
+
+ def retrieve_subject(
+ self,
+ subject_name: str,
+ *,
+ database_cluster_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ConfigRetrieveSubjectResponse:
+ """
+ To retrieve the Schema Registry configuration for a Subject of a Kafka cluster,
+ send a GET request to
+ `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response
+ is a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ if not subject_name:
+ raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}")
+ return self._get(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigRetrieveSubjectResponse,
+ )
+
+ def update_subject(
+ self,
+ subject_name: str,
+ *,
+ database_cluster_uuid: str,
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ConfigUpdateSubjectResponse:
+ """
+ To update the Schema Registry configuration for a Subject of a Kafka cluster,
+ send a PUT request to
+ `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response
+ is a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ compatibility_level: The compatibility level of the schema registry.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ if not subject_name:
+ raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}")
+ return self._put(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}",
+ body=maybe_transform(
+ {"compatibility_level": compatibility_level}, config_update_subject_params.ConfigUpdateSubjectParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigUpdateSubjectResponse,
+ )
+
+
+class AsyncConfigResource(AsyncAPIResource):
+ """
+ DigitalOcean's [managed database service](https://docs.digitalocean.com/products/databases)
+ simplifies the creation and management of highly available database clusters. Currently, it
+ offers support for [PostgreSQL](http://docs.digitalocean.com/products/databases/postgresql/),
+ [Caching](https://docs.digitalocean.com/products/databases/redis/),
+ [Valkey](https://docs.digitalocean.com/products/databases/valkey/),
+ [MySQL](https://docs.digitalocean.com/products/databases/mysql/),
+ [MongoDB](https://docs.digitalocean.com/products/databases/mongodb/), and
+ [OpenSearch](https://docs.digitalocean.com/products/databases/opensearch/).
+
+ By sending requests to the `/v2/databases` endpoint, you can list, create, or delete
+ database clusters as well as scale the size of a cluster, add or remove read-only replicas,
+ and manage other configuration details.
+
+ Database clusters may be deployed in a multi-node, high-availability configuration.
+ If your machine type is above the basic nodes, your node plan is above the smallest option,
+ or you are running MongoDB, you may additionally include up to two standby nodes in your cluster.
+
+ The size of individual nodes in a database cluster is represented by a human-readable slug,
+ which is used in some of the following requests. Each slug denotes the node's identifier,
+ CPU count, and amount of RAM, in that order.
+
+ For a list of currently available database slugs and options, use the `/v2/databases/options` endpoint or use the
+ `doctl databases options` [command](https://docs.digitalocean.com/reference/doctl/reference/databases/options).
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncConfigResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncConfigResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncConfigResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncConfigResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ database_cluster_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ConfigRetrieveResponse:
+ """
+ To retrieve the Schema Registry configuration for a Kafka cluster, send a GET
+ request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is
+ a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ database_cluster_uuid: str,
+ *,
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ConfigUpdateResponse:
+ """
+ To update the Schema Registry configuration for a Kafka cluster, send a PUT
+ request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is
+ a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ compatibility_level: The compatibility level of the schema registry.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ return await self._put(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config",
+ body=await async_maybe_transform(
+ {"compatibility_level": compatibility_level}, config_update_params.ConfigUpdateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigUpdateResponse,
+ )
+
+ async def retrieve_subject(
+ self,
+ subject_name: str,
+ *,
+ database_cluster_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ConfigRetrieveSubjectResponse:
+ """
+ To retrieve the Schema Registry configuration for a Subject of a Kafka cluster,
+ send a GET request to
+ `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response
+ is a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ if not subject_name:
+ raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}")
+ return await self._get(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigRetrieveSubjectResponse,
+ )
+
+ async def update_subject(
+ self,
+ subject_name: str,
+ *,
+ database_cluster_uuid: str,
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ConfigUpdateSubjectResponse:
+ """
+ To update the Schema Registry configuration for a Subject of a Kafka cluster,
+ send a PUT request to
+ `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response
+ is a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ compatibility_level: The compatibility level of the schema registry.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ if not subject_name:
+ raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}")
+ return await self._put(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}",
+ body=await async_maybe_transform(
+ {"compatibility_level": compatibility_level}, config_update_subject_params.ConfigUpdateSubjectParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigUpdateSubjectResponse,
+ )
+
+
+class ConfigResourceWithRawResponse:
+ def __init__(self, config: ConfigResource) -> None:
+ self._config = config
+
+ self.retrieve = to_raw_response_wrapper(
+ config.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ config.update,
+ )
+ self.retrieve_subject = to_raw_response_wrapper(
+ config.retrieve_subject,
+ )
+ self.update_subject = to_raw_response_wrapper(
+ config.update_subject,
+ )
+
+
+class AsyncConfigResourceWithRawResponse:
+ def __init__(self, config: AsyncConfigResource) -> None:
+ self._config = config
+
+ self.retrieve = async_to_raw_response_wrapper(
+ config.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ config.update,
+ )
+ self.retrieve_subject = async_to_raw_response_wrapper(
+ config.retrieve_subject,
+ )
+ self.update_subject = async_to_raw_response_wrapper(
+ config.update_subject,
+ )
+
+
+class ConfigResourceWithStreamingResponse:
+ def __init__(self, config: ConfigResource) -> None:
+ self._config = config
+
+ self.retrieve = to_streamed_response_wrapper(
+ config.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ config.update,
+ )
+ self.retrieve_subject = to_streamed_response_wrapper(
+ config.retrieve_subject,
+ )
+ self.update_subject = to_streamed_response_wrapper(
+ config.update_subject,
+ )
+
+
+class AsyncConfigResourceWithStreamingResponse:
+ def __init__(self, config: AsyncConfigResource) -> None:
+ self._config = config
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ config.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ config.update,
+ )
+ self.retrieve_subject = async_to_streamed_response_wrapper(
+ config.retrieve_subject,
+ )
+ self.update_subject = async_to_streamed_response_wrapper(
+ config.update_subject,
+ )
diff --git a/src/gradient/resources/databases/schema_registry/schema_registry.py b/src/gradient/resources/databases/schema_registry/schema_registry.py
new file mode 100644
index 00000000..de047c50
--- /dev/null
+++ b/src/gradient/resources/databases/schema_registry/schema_registry.py
@@ -0,0 +1,252 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .config import (
+ ConfigResource,
+ AsyncConfigResource,
+ ConfigResourceWithRawResponse,
+ AsyncConfigResourceWithRawResponse,
+ ConfigResourceWithStreamingResponse,
+ AsyncConfigResourceWithStreamingResponse,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["SchemaRegistryResource", "AsyncSchemaRegistryResource"]
+
+
+class SchemaRegistryResource(SyncAPIResource):
+ @cached_property
+ def config(self) -> ConfigResource:
+ """
+ DigitalOcean's [managed database service](https://docs.digitalocean.com/products/databases)
+ simplifies the creation and management of highly available database clusters. Currently, it
+ offers support for [PostgreSQL](http://docs.digitalocean.com/products/databases/postgresql/),
+ [Caching](https://docs.digitalocean.com/products/databases/redis/),
+ [Valkey](https://docs.digitalocean.com/products/databases/valkey/),
+ [MySQL](https://docs.digitalocean.com/products/databases/mysql/),
+ [MongoDB](https://docs.digitalocean.com/products/databases/mongodb/), and
+ [OpenSearch](https://docs.digitalocean.com/products/databases/opensearch/).
+
+ By sending requests to the `/v2/databases` endpoint, you can list, create, or delete
+ database clusters as well as scale the size of a cluster, add or remove read-only replicas,
+ and manage other configuration details.
+
+ Database clusters may be deployed in a multi-node, high-availability configuration.
+ If your machine type is above the basic nodes, your node plan is above the smallest option,
+ or you are running MongoDB, you may additionally include up to two standby nodes in your cluster.
+
+ The size of individual nodes in a database cluster is represented by a human-readable slug,
+ which is used in some of the following requests. Each slug denotes the node's identifier,
+ CPU count, and amount of RAM, in that order.
+
+ For a list of currently available database slugs and options, use the `/v2/databases/options` endpoint or use the
+ `doctl databases options` [command](https://docs.digitalocean.com/reference/doctl/reference/databases/options).
+ """
+ return ConfigResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> SchemaRegistryResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return SchemaRegistryResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> SchemaRegistryResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return SchemaRegistryResourceWithStreamingResponse(self)
+
+
+class AsyncSchemaRegistryResource(AsyncAPIResource):
+ @cached_property
+ def config(self) -> AsyncConfigResource:
+ """
+ DigitalOcean's [managed database service](https://docs.digitalocean.com/products/databases)
+ simplifies the creation and management of highly available database clusters. Currently, it
+ offers support for [PostgreSQL](http://docs.digitalocean.com/products/databases/postgresql/),
+ [Caching](https://docs.digitalocean.com/products/databases/redis/),
+ [Valkey](https://docs.digitalocean.com/products/databases/valkey/),
+ [MySQL](https://docs.digitalocean.com/products/databases/mysql/),
+ [MongoDB](https://docs.digitalocean.com/products/databases/mongodb/), and
+ [OpenSearch](https://docs.digitalocean.com/products/databases/opensearch/).
+
+ By sending requests to the `/v2/databases` endpoint, you can list, create, or delete
+ database clusters as well as scale the size of a cluster, add or remove read-only replicas,
+ and manage other configuration details.
+
+ Database clusters may be deployed in a multi-node, high-availability configuration.
+ If your machine type is above the basic nodes, your node plan is above the smallest option,
+ or you are running MongoDB, you may additionally include up to two standby nodes in your cluster.
+
+ The size of individual nodes in a database cluster is represented by a human-readable slug,
+ which is used in some of the following requests. Each slug denotes the node's identifier,
+ CPU count, and amount of RAM, in that order.
+
+ For a list of currently available database slugs and options, use the `/v2/databases/options` endpoint or use the
+ `doctl databases options` [command](https://docs.digitalocean.com/reference/doctl/reference/databases/options).
+ """
+ return AsyncConfigResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncSchemaRegistryResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncSchemaRegistryResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncSchemaRegistryResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncSchemaRegistryResourceWithStreamingResponse(self)
+
+
+class SchemaRegistryResourceWithRawResponse:
+ def __init__(self, schema_registry: SchemaRegistryResource) -> None:
+ self._schema_registry = schema_registry
+
+ @cached_property
+ def config(self) -> ConfigResourceWithRawResponse:
+ """
+ DigitalOcean's [managed database service](https://docs.digitalocean.com/products/databases)
+ simplifies the creation and management of highly available database clusters. Currently, it
+ offers support for [PostgreSQL](http://docs.digitalocean.com/products/databases/postgresql/),
+ [Caching](https://docs.digitalocean.com/products/databases/redis/),
+ [Valkey](https://docs.digitalocean.com/products/databases/valkey/),
+ [MySQL](https://docs.digitalocean.com/products/databases/mysql/),
+ [MongoDB](https://docs.digitalocean.com/products/databases/mongodb/), and
+ [OpenSearch](https://docs.digitalocean.com/products/databases/opensearch/).
+
+ By sending requests to the `/v2/databases` endpoint, you can list, create, or delete
+ database clusters as well as scale the size of a cluster, add or remove read-only replicas,
+ and manage other configuration details.
+
+ Database clusters may be deployed in a multi-node, high-availability configuration.
+ If your machine type is above the basic nodes, your node plan is above the smallest option,
+ or you are running MongoDB, you may additionally include up to two standby nodes in your cluster.
+
+ The size of individual nodes in a database cluster is represented by a human-readable slug,
+ which is used in some of the following requests. Each slug denotes the node's identifier,
+ CPU count, and amount of RAM, in that order.
+
+ For a list of currently available database slugs and options, use the `/v2/databases/options` endpoint or use the
+ `doctl databases options` [command](https://docs.digitalocean.com/reference/doctl/reference/databases/options).
+ """
+ return ConfigResourceWithRawResponse(self._schema_registry.config)
+
+
+class AsyncSchemaRegistryResourceWithRawResponse:
+ def __init__(self, schema_registry: AsyncSchemaRegistryResource) -> None:
+ self._schema_registry = schema_registry
+
+ @cached_property
+ def config(self) -> AsyncConfigResourceWithRawResponse:
+ """
+ DigitalOcean's [managed database service](https://docs.digitalocean.com/products/databases)
+ simplifies the creation and management of highly available database clusters. Currently, it
+ offers support for [PostgreSQL](http://docs.digitalocean.com/products/databases/postgresql/),
+ [Caching](https://docs.digitalocean.com/products/databases/redis/),
+ [Valkey](https://docs.digitalocean.com/products/databases/valkey/),
+ [MySQL](https://docs.digitalocean.com/products/databases/mysql/),
+ [MongoDB](https://docs.digitalocean.com/products/databases/mongodb/), and
+ [OpenSearch](https://docs.digitalocean.com/products/databases/opensearch/).
+
+ By sending requests to the `/v2/databases` endpoint, you can list, create, or delete
+ database clusters as well as scale the size of a cluster, add or remove read-only replicas,
+ and manage other configuration details.
+
+ Database clusters may be deployed in a multi-node, high-availability configuration.
+ If your machine type is above the basic nodes, your node plan is above the smallest option,
+ or you are running MongoDB, you may additionally include up to two standby nodes in your cluster.
+
+ The size of individual nodes in a database cluster is represented by a human-readable slug,
+ which is used in some of the following requests. Each slug denotes the node's identifier,
+ CPU count, and amount of RAM, in that order.
+
+ For a list of currently available database slugs and options, use the `/v2/databases/options` endpoint or use the
+ `doctl databases options` [command](https://docs.digitalocean.com/reference/doctl/reference/databases/options).
+ """
+ return AsyncConfigResourceWithRawResponse(self._schema_registry.config)
+
+
+class SchemaRegistryResourceWithStreamingResponse:
+ def __init__(self, schema_registry: SchemaRegistryResource) -> None:
+ self._schema_registry = schema_registry
+
+ @cached_property
+ def config(self) -> ConfigResourceWithStreamingResponse:
+ """
+ DigitalOcean's [managed database service](https://docs.digitalocean.com/products/databases)
+ simplifies the creation and management of highly available database clusters. Currently, it
+ offers support for [PostgreSQL](http://docs.digitalocean.com/products/databases/postgresql/),
+ [Caching](https://docs.digitalocean.com/products/databases/redis/),
+ [Valkey](https://docs.digitalocean.com/products/databases/valkey/),
+ [MySQL](https://docs.digitalocean.com/products/databases/mysql/),
+ [MongoDB](https://docs.digitalocean.com/products/databases/mongodb/), and
+ [OpenSearch](https://docs.digitalocean.com/products/databases/opensearch/).
+
+ By sending requests to the `/v2/databases` endpoint, you can list, create, or delete
+ database clusters as well as scale the size of a cluster, add or remove read-only replicas,
+ and manage other configuration details.
+
+ Database clusters may be deployed in a multi-node, high-availability configuration.
+ If your machine type is above the basic nodes, your node plan is above the smallest option,
+ or you are running MongoDB, you may additionally include up to two standby nodes in your cluster.
+
+ The size of individual nodes in a database cluster is represented by a human-readable slug,
+ which is used in some of the following requests. Each slug denotes the node's identifier,
+ CPU count, and amount of RAM, in that order.
+
+ For a list of currently available database slugs and options, use the `/v2/databases/options` endpoint or use the
+ `doctl databases options` [command](https://docs.digitalocean.com/reference/doctl/reference/databases/options).
+ """
+ return ConfigResourceWithStreamingResponse(self._schema_registry.config)
+
+
+class AsyncSchemaRegistryResourceWithStreamingResponse:
+ def __init__(self, schema_registry: AsyncSchemaRegistryResource) -> None:
+ self._schema_registry = schema_registry
+
+ @cached_property
+ def config(self) -> AsyncConfigResourceWithStreamingResponse:
+ """
+ DigitalOcean's [managed database service](https://docs.digitalocean.com/products/databases)
+ simplifies the creation and management of highly available database clusters. Currently, it
+ offers support for [PostgreSQL](http://docs.digitalocean.com/products/databases/postgresql/),
+ [Caching](https://docs.digitalocean.com/products/databases/redis/),
+ [Valkey](https://docs.digitalocean.com/products/databases/valkey/),
+ [MySQL](https://docs.digitalocean.com/products/databases/mysql/),
+ [MongoDB](https://docs.digitalocean.com/products/databases/mongodb/), and
+ [OpenSearch](https://docs.digitalocean.com/products/databases/opensearch/).
+
+ By sending requests to the `/v2/databases` endpoint, you can list, create, or delete
+ database clusters as well as scale the size of a cluster, add or remove read-only replicas,
+ and manage other configuration details.
+
+ Database clusters may be deployed in a multi-node, high-availability configuration.
+ If your machine type is above the basic nodes, your node plan is above the smallest option,
+ or you are running MongoDB, you may additionally include up to two standby nodes in your cluster.
+
+ The size of individual nodes in a database cluster is represented by a human-readable slug,
+ which is used in some of the following requests. Each slug denotes the node's identifier,
+ CPU count, and amount of RAM, in that order.
+
+ For a list of currently available database slugs and options, use the `/v2/databases/options` endpoint or use the
+ `doctl databases options` [command](https://docs.digitalocean.com/reference/doctl/reference/databases/options).
+ """
+ return AsyncConfigResourceWithStreamingResponse(self._schema_registry.config)
diff --git a/src/gradient/resources/gpu_droplets/__init__.py b/src/gradient/resources/gpu_droplets/__init__.py
new file mode 100644
index 00000000..064a36ce
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/__init__.py
@@ -0,0 +1,187 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .sizes import (
+ SizesResource,
+ AsyncSizesResource,
+ SizesResourceWithRawResponse,
+ AsyncSizesResourceWithRawResponse,
+ SizesResourceWithStreamingResponse,
+ AsyncSizesResourceWithStreamingResponse,
+)
+from .images import (
+ ImagesResource,
+ AsyncImagesResource,
+ ImagesResourceWithRawResponse,
+ AsyncImagesResourceWithRawResponse,
+ ImagesResourceWithStreamingResponse,
+ AsyncImagesResourceWithStreamingResponse,
+)
+from .account import (
+ AccountResource,
+ AsyncAccountResource,
+ AccountResourceWithRawResponse,
+ AsyncAccountResourceWithRawResponse,
+ AccountResourceWithStreamingResponse,
+ AsyncAccountResourceWithStreamingResponse,
+)
+from .actions import (
+ ActionsResource,
+ AsyncActionsResource,
+ ActionsResourceWithRawResponse,
+ AsyncActionsResourceWithRawResponse,
+ ActionsResourceWithStreamingResponse,
+ AsyncActionsResourceWithStreamingResponse,
+)
+from .backups import (
+ BackupsResource,
+ AsyncBackupsResource,
+ BackupsResourceWithRawResponse,
+ AsyncBackupsResourceWithRawResponse,
+ BackupsResourceWithStreamingResponse,
+ AsyncBackupsResourceWithStreamingResponse,
+)
+from .volumes import (
+ VolumesResource,
+ AsyncVolumesResource,
+ VolumesResourceWithRawResponse,
+ AsyncVolumesResourceWithRawResponse,
+ VolumesResourceWithStreamingResponse,
+ AsyncVolumesResourceWithStreamingResponse,
+)
+from .autoscale import (
+ AutoscaleResource,
+ AsyncAutoscaleResource,
+ AutoscaleResourceWithRawResponse,
+ AsyncAutoscaleResourceWithRawResponse,
+ AutoscaleResourceWithStreamingResponse,
+ AsyncAutoscaleResourceWithStreamingResponse,
+)
+from .firewalls import (
+ FirewallsResource,
+ AsyncFirewallsResource,
+ FirewallsResourceWithRawResponse,
+ AsyncFirewallsResourceWithRawResponse,
+ FirewallsResourceWithStreamingResponse,
+ AsyncFirewallsResourceWithStreamingResponse,
+)
+from .snapshots import (
+ SnapshotsResource,
+ AsyncSnapshotsResource,
+ SnapshotsResourceWithRawResponse,
+ AsyncSnapshotsResourceWithRawResponse,
+ SnapshotsResourceWithStreamingResponse,
+ AsyncSnapshotsResourceWithStreamingResponse,
+)
+from .floating_ips import (
+ FloatingIPsResource,
+ AsyncFloatingIPsResource,
+ FloatingIPsResourceWithRawResponse,
+ AsyncFloatingIPsResourceWithRawResponse,
+ FloatingIPsResourceWithStreamingResponse,
+ AsyncFloatingIPsResourceWithStreamingResponse,
+)
+from .gpu_droplets import (
+ GPUDropletsResource,
+ AsyncGPUDropletsResource,
+ GPUDropletsResourceWithRawResponse,
+ AsyncGPUDropletsResourceWithRawResponse,
+ GPUDropletsResourceWithStreamingResponse,
+ AsyncGPUDropletsResourceWithStreamingResponse,
+)
+from .load_balancers import (
+ LoadBalancersResource,
+ AsyncLoadBalancersResource,
+ LoadBalancersResourceWithRawResponse,
+ AsyncLoadBalancersResourceWithRawResponse,
+ LoadBalancersResourceWithStreamingResponse,
+ AsyncLoadBalancersResourceWithStreamingResponse,
+)
+from .destroy_with_associated_resources import (
+ DestroyWithAssociatedResourcesResource,
+ AsyncDestroyWithAssociatedResourcesResource,
+ DestroyWithAssociatedResourcesResourceWithRawResponse,
+ AsyncDestroyWithAssociatedResourcesResourceWithRawResponse,
+ DestroyWithAssociatedResourcesResourceWithStreamingResponse,
+ AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "BackupsResource",
+ "AsyncBackupsResource",
+ "BackupsResourceWithRawResponse",
+ "AsyncBackupsResourceWithRawResponse",
+ "BackupsResourceWithStreamingResponse",
+ "AsyncBackupsResourceWithStreamingResponse",
+ "ActionsResource",
+ "AsyncActionsResource",
+ "ActionsResourceWithRawResponse",
+ "AsyncActionsResourceWithRawResponse",
+ "ActionsResourceWithStreamingResponse",
+ "AsyncActionsResourceWithStreamingResponse",
+ "DestroyWithAssociatedResourcesResource",
+ "AsyncDestroyWithAssociatedResourcesResource",
+ "DestroyWithAssociatedResourcesResourceWithRawResponse",
+ "AsyncDestroyWithAssociatedResourcesResourceWithRawResponse",
+ "DestroyWithAssociatedResourcesResourceWithStreamingResponse",
+ "AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse",
+ "AutoscaleResource",
+ "AsyncAutoscaleResource",
+ "AutoscaleResourceWithRawResponse",
+ "AsyncAutoscaleResourceWithRawResponse",
+ "AutoscaleResourceWithStreamingResponse",
+ "AsyncAutoscaleResourceWithStreamingResponse",
+ "FirewallsResource",
+ "AsyncFirewallsResource",
+ "FirewallsResourceWithRawResponse",
+ "AsyncFirewallsResourceWithRawResponse",
+ "FirewallsResourceWithStreamingResponse",
+ "AsyncFirewallsResourceWithStreamingResponse",
+ "FloatingIPsResource",
+ "AsyncFloatingIPsResource",
+ "FloatingIPsResourceWithRawResponse",
+ "AsyncFloatingIPsResourceWithRawResponse",
+ "FloatingIPsResourceWithStreamingResponse",
+ "AsyncFloatingIPsResourceWithStreamingResponse",
+ "ImagesResource",
+ "AsyncImagesResource",
+ "ImagesResourceWithRawResponse",
+ "AsyncImagesResourceWithRawResponse",
+ "ImagesResourceWithStreamingResponse",
+ "AsyncImagesResourceWithStreamingResponse",
+ "LoadBalancersResource",
+ "AsyncLoadBalancersResource",
+ "LoadBalancersResourceWithRawResponse",
+ "AsyncLoadBalancersResourceWithRawResponse",
+ "LoadBalancersResourceWithStreamingResponse",
+ "AsyncLoadBalancersResourceWithStreamingResponse",
+ "SizesResource",
+ "AsyncSizesResource",
+ "SizesResourceWithRawResponse",
+ "AsyncSizesResourceWithRawResponse",
+ "SizesResourceWithStreamingResponse",
+ "AsyncSizesResourceWithStreamingResponse",
+ "SnapshotsResource",
+ "AsyncSnapshotsResource",
+ "SnapshotsResourceWithRawResponse",
+ "AsyncSnapshotsResourceWithRawResponse",
+ "SnapshotsResourceWithStreamingResponse",
+ "AsyncSnapshotsResourceWithStreamingResponse",
+ "VolumesResource",
+ "AsyncVolumesResource",
+ "VolumesResourceWithRawResponse",
+ "AsyncVolumesResourceWithRawResponse",
+ "VolumesResourceWithStreamingResponse",
+ "AsyncVolumesResourceWithStreamingResponse",
+ "AccountResource",
+ "AsyncAccountResource",
+ "AccountResourceWithRawResponse",
+ "AsyncAccountResourceWithRawResponse",
+ "AccountResourceWithStreamingResponse",
+ "AsyncAccountResourceWithStreamingResponse",
+ "GPUDropletsResource",
+ "AsyncGPUDropletsResource",
+ "GPUDropletsResourceWithRawResponse",
+ "AsyncGPUDropletsResourceWithRawResponse",
+ "GPUDropletsResourceWithStreamingResponse",
+ "AsyncGPUDropletsResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/gpu_droplets/account/__init__.py b/src/gradient/resources/gpu_droplets/account/__init__.py
new file mode 100644
index 00000000..33286c3f
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/account/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from .account import (
+ AccountResource,
+ AsyncAccountResource,
+ AccountResourceWithRawResponse,
+ AsyncAccountResourceWithRawResponse,
+ AccountResourceWithStreamingResponse,
+ AsyncAccountResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "KeysResource",
+ "AsyncKeysResource",
+ "KeysResourceWithRawResponse",
+ "AsyncKeysResourceWithRawResponse",
+ "KeysResourceWithStreamingResponse",
+ "AsyncKeysResourceWithStreamingResponse",
+ "AccountResource",
+ "AsyncAccountResource",
+ "AccountResourceWithRawResponse",
+ "AsyncAccountResourceWithRawResponse",
+ "AccountResourceWithStreamingResponse",
+ "AsyncAccountResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/gpu_droplets/account/account.py b/src/gradient/resources/gpu_droplets/account/account.py
new file mode 100644
index 00000000..05f71ea4
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/account/account.py
@@ -0,0 +1,108 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["AccountResource", "AsyncAccountResource"]
+
+
+class AccountResource(SyncAPIResource):
+ @cached_property
+ def keys(self) -> KeysResource:
+ """Manage SSH keys available on your account."""
+ return KeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AccountResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AccountResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AccountResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AccountResourceWithStreamingResponse(self)
+
+
+class AsyncAccountResource(AsyncAPIResource):
+ @cached_property
+ def keys(self) -> AsyncKeysResource:
+ """Manage SSH keys available on your account."""
+ return AsyncKeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAccountResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAccountResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAccountResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncAccountResourceWithStreamingResponse(self)
+
+
+class AccountResourceWithRawResponse:
+ def __init__(self, account: AccountResource) -> None:
+ self._account = account
+
+ @cached_property
+ def keys(self) -> KeysResourceWithRawResponse:
+ """Manage SSH keys available on your account."""
+ return KeysResourceWithRawResponse(self._account.keys)
+
+
+class AsyncAccountResourceWithRawResponse:
+ def __init__(self, account: AsyncAccountResource) -> None:
+ self._account = account
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithRawResponse:
+ """Manage SSH keys available on your account."""
+ return AsyncKeysResourceWithRawResponse(self._account.keys)
+
+
+class AccountResourceWithStreamingResponse:
+ def __init__(self, account: AccountResource) -> None:
+ self._account = account
+
+ @cached_property
+ def keys(self) -> KeysResourceWithStreamingResponse:
+ """Manage SSH keys available on your account."""
+ return KeysResourceWithStreamingResponse(self._account.keys)
+
+
+class AsyncAccountResourceWithStreamingResponse:
+ def __init__(self, account: AsyncAccountResource) -> None:
+ self._account = account
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithStreamingResponse:
+ """Manage SSH keys available on your account."""
+ return AsyncKeysResourceWithStreamingResponse(self._account.keys)
diff --git a/src/gradient/resources/gpu_droplets/account/keys.py b/src/gradient/resources/gpu_droplets/account/keys.py
new file mode 100644
index 00000000..22aa6f0d
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/account/keys.py
@@ -0,0 +1,592 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets.account import key_list_params, key_create_params, key_update_params
+from ....types.gpu_droplets.account.key_list_response import KeyListResponse
+from ....types.gpu_droplets.account.key_create_response import KeyCreateResponse
+from ....types.gpu_droplets.account.key_update_response import KeyUpdateResponse
+from ....types.gpu_droplets.account.key_retrieve_response import KeyRetrieveResponse
+
+__all__ = ["KeysResource", "AsyncKeysResource"]
+
+
+class KeysResource(SyncAPIResource):
+ """Manage SSH keys available on your account."""
+
+ @cached_property
+ def with_raw_response(self) -> KeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return KeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> KeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return KeysResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ name: str,
+ public_key: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyCreateResponse:
+ """
+ To add a new SSH public key to your DigitalOcean account, send a POST request to
+ `/v2/account/keys`. Set the `name` attribute to the name you wish to use and the
+ `public_key` attribute to the full public key you are adding.
+
+ Args:
+ name: A human-readable display name for this key, used to easily identify the SSH keys
+ when they are displayed.
+
+ public_key: The entire public key string that was uploaded. Embedded into the root user's
+ `authorized_keys` file if you include this key during Droplet creation.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys",
+ body=maybe_transform(
+ {
+ "name": name,
+ "public_key": public_key,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ ssh_key_identifier: Union[int, str],
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyRetrieveResponse:
+ """
+ To get information about a key, send a GET request to `/v2/account/keys/$KEY_ID`
+ or `/v2/account/keys/$KEY_FINGERPRINT`. The response will be a JSON object with
+ the key `ssh_key` and value an ssh_key object which contains the standard
+ ssh_key attributes.
+
+ Args:
+ ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH
+ key into a Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/account/keys/{ssh_key_identifier}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ def update(
+ self,
+ ssh_key_identifier: Union[int, str],
+ *,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyUpdateResponse:
+ """
+ To update the name of an SSH key, send a PUT request to either
+ `/v2/account/keys/$SSH_KEY_ID` or `/v2/account/keys/$SSH_KEY_FINGERPRINT`. Set
+ the `name` attribute to the new name you want to use.
+
+ Args:
+ ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH
+ key into a Droplet.
+
+ name: A human-readable display name for this key, used to easily identify the SSH keys
+ when they are displayed.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._put(
+ f"/v2/account/keys/{ssh_key_identifier}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}",
+ body=maybe_transform({"name": name}, key_update_params.KeyUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyListResponse:
+ """
+ To list all of the keys in your account, send a GET request to
+ `/v2/account/keys`. The response will be a JSON object with a key set to
+ `ssh_keys`. The value of this will be an array of ssh_key objects, each of which
+ contains the standard ssh_key attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ def delete(
+ self,
+ ssh_key_identifier: Union[int, str],
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To destroy a public SSH key that you have in your account, send a DELETE request
+ to `/v2/account/keys/$KEY_ID` or `/v2/account/keys/$KEY_FINGERPRINT`. A 204
+ status will be returned, indicating that the action was successful and that the
+ response body is empty.
+
+ Args:
+ ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH
+ key into a Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/account/keys/{ssh_key_identifier}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncKeysResource(AsyncAPIResource):
+ """Manage SSH keys available on your account."""
+
+ @cached_property
+ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncKeysResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ name: str,
+ public_key: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyCreateResponse:
+ """
+ To add a new SSH public key to your DigitalOcean account, send a POST request to
+ `/v2/account/keys`. Set the `name` attribute to the name you wish to use and the
+ `public_key` attribute to the full public key you are adding.
+
+ Args:
+ name: A human-readable display name for this key, used to easily identify the SSH keys
+ when they are displayed.
+
+ public_key: The entire public key string that was uploaded. Embedded into the root user's
+ `authorized_keys` file if you include this key during Droplet creation.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys",
+ body=await async_maybe_transform(
+ {
+ "name": name,
+ "public_key": public_key,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ ssh_key_identifier: Union[int, str],
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyRetrieveResponse:
+ """
+ To get information about a key, send a GET request to `/v2/account/keys/$KEY_ID`
+ or `/v2/account/keys/$KEY_FINGERPRINT`. The response will be a JSON object with
+ the key `ssh_key` and value an ssh_key object which contains the standard
+ ssh_key attributes.
+
+ Args:
+ ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH
+ key into a Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/account/keys/{ssh_key_identifier}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ ssh_key_identifier: Union[int, str],
+ *,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyUpdateResponse:
+ """
+ To update the name of an SSH key, send a PUT request to either
+ `/v2/account/keys/$SSH_KEY_ID` or `/v2/account/keys/$SSH_KEY_FINGERPRINT`. Set
+ the `name` attribute to the new name you want to use.
+
+ Args:
+ ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH
+ key into a Droplet.
+
+ name: A human-readable display name for this key, used to easily identify the SSH keys
+ when they are displayed.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._put(
+ f"/v2/account/keys/{ssh_key_identifier}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}",
+ body=await async_maybe_transform({"name": name}, key_update_params.KeyUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KeyListResponse:
+ """
+ To list all of the keys in your account, send a GET request to
+ `/v2/account/keys`. The response will be a JSON object with a key set to
+ `ssh_keys`. The value of this will be an array of ssh_key objects, each of which
+ contains the standard ssh_key attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ async def delete(
+ self,
+ ssh_key_identifier: Union[int, str],
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To destroy a public SSH key that you have in your account, send a DELETE request
+ to `/v2/account/keys/$KEY_ID` or `/v2/account/keys/$KEY_FINGERPRINT`. A 204
+ status will be returned, indicating that the action was successful and that the
+ response body is empty.
+
+ Args:
+ ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH
+ key into a Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/account/keys/{ssh_key_identifier}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class KeysResourceWithRawResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ keys.delete,
+ )
+
+
+class AsyncKeysResourceWithRawResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ keys.delete,
+ )
+
+
+class KeysResourceWithStreamingResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ keys.delete,
+ )
+
+
+class AsyncKeysResourceWithStreamingResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ keys.delete,
+ )
diff --git a/src/gradient/resources/gpu_droplets/actions.py b/src/gradient/resources/gpu_droplets/actions.py
new file mode 100644
index 00000000..dd8e55dc
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/actions.py
@@ -0,0 +1,2090 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import required_args, maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.gpu_droplets import action_list_params, action_initiate_params, action_bulk_initiate_params
+from ...types.droplet_backup_policy_param import DropletBackupPolicyParam
+from ...types.gpu_droplets.action_list_response import ActionListResponse
+from ...types.gpu_droplets.action_initiate_response import ActionInitiateResponse
+from ...types.gpu_droplets.action_retrieve_response import ActionRetrieveResponse
+from ...types.gpu_droplets.action_bulk_initiate_response import ActionBulkInitiateResponse
+
+__all__ = ["ActionsResource", "AsyncActionsResource"]
+
+
+class ActionsResource(SyncAPIResource):
+ """Droplet actions are tasks that can be executed on a Droplet.
+
+ These can be
+ things like rebooting, resizing, snapshotting, etc.
+
+ Droplet action requests are generally targeted at one of the "actions"
+ endpoints for a specific Droplet. The specific actions are usually
+ initiated by sending a POST request with the action and arguments as
+ parameters.
+
+ Droplet action requests create a Droplet actions object, which can be used
+ to get information about the status of an action. Creating a Droplet
+ action is asynchronous: the HTTP call will return the action object before
+ the action has finished processing on the Droplet. The current status of
+ an action can be retrieved from either the Droplet actions endpoint or the
+ global actions endpoint. If a Droplet action is uncompleted it may block
+ the creation of a subsequent action for that Droplet, the locked attribute
+ of the Droplet will be true and attempts to create a Droplet action will
+ fail with a status of 422.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> ActionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ActionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ActionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ActionsResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ action_id: int,
+ *,
+ droplet_id: int,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionRetrieveResponse:
+ """
+ To retrieve a Droplet action, send a GET request to
+ `/v2/droplets/$DROPLET_ID/actions/$ACTION_ID`.
+
+ The response will be a JSON object with a key called `action`. The value will be
+ a Droplet action object.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}/actions/{action_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions/{action_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionRetrieveResponse,
+ )
+
+ def list(
+ self,
+ droplet_id: int,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionListResponse:
+ """
+ To retrieve a list of all actions that have been executed for a Droplet, send a
+ GET request to `/v2/droplets/$DROPLET_ID/actions`.
+
+ The results will be returned as a JSON object with an `actions` key. This will
+ be set to an array filled with `action` objects containing the standard `action`
+ attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ action_list_params.ActionListParams,
+ ),
+ ),
+ cast_to=ActionListResponse,
+ )
+
+ @overload
+ def bulk_initiate(
+ self,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ tag_name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionBulkInitiateResponse:
+ """Some actions can be performed in bulk on tagged Droplets.
+
+ The actions can be
+ initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with
+ the action arguments.
+
+ Only a sub-set of action types are supported:
+
+ - `power_cycle`
+ - `power_on`
+ - `power_off`
+ - `shutdown`
+ - `enable_ipv6`
+ - `enable_backups`
+ - `disable_backups`
+ - `snapshot` (also requires `image:create` permission)
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or
+ `type`. Requires `tag:read` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def bulk_initiate(
+ self,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ tag_name: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionBulkInitiateResponse:
+ """Some actions can be performed in bulk on tagged Droplets.
+
+ The actions can be
+ initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with
+ the action arguments.
+
+ Only a sub-set of action types are supported:
+
+ - `power_cycle`
+ - `power_on`
+ - `power_off`
+ - `shutdown`
+ - `enable_ipv6`
+ - `enable_backups`
+ - `disable_backups`
+ - `snapshot` (also requires `image:create` permission)
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or
+ `type`. Requires `tag:read` scope.
+
+ name: The name to give the new snapshot of the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["type"])
+ def bulk_initiate(
+ self,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ tag_name: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionBulkInitiateResponse:
+ return self._post(
+ "/v2/droplets/actions"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/droplets/actions",
+ body=maybe_transform(
+ {
+ "type": type,
+ "name": name,
+ },
+ action_bulk_initiate_params.ActionBulkInitiateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"tag_name": tag_name}, action_bulk_initiate_params.ActionBulkInitiateParams),
+ ),
+ cast_to=ActionBulkInitiateResponse,
+ )
+
+ @overload
+ def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ backup_policy: An object specifying the backup policy for the Droplet. If omitted, the backup
+ plan will default to daily.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ backup_policy: An object specifying the backup policy for the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ image: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ image: The ID of a backup of the current Droplet instance to restore from.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ disk: bool | Omit = omit,
+ size: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ disk: When `true`, the Droplet's disk will be resized in addition to its RAM and CPU.
+ This is a permanent change and cannot be reversed as a Droplet's disk size
+ cannot be decreased.
+
+ size: The slug identifier for the size to which you wish to resize the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ image: Union[str, int] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ image: The image ID of a public or private image or the slug identifier for a public
+ image. The Droplet will be rebuilt using this image as its base.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ name: The new name for the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ kernel: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ kernel: A unique number used to identify and reference a specific kernel.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ name: The name to give the new snapshot of the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["type"])
+ def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ image: int | Union[str, int] | Omit = omit,
+ disk: bool | Omit = omit,
+ size: str | Omit = omit,
+ name: str | Omit = omit,
+ kernel: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ return self._post(
+ f"/v2/droplets/{droplet_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions",
+ body=maybe_transform(
+ {
+ "type": type,
+ "backup_policy": backup_policy,
+ "image": image,
+ "disk": disk,
+ "size": size,
+ "name": name,
+ "kernel": kernel,
+ },
+ action_initiate_params.ActionInitiateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionInitiateResponse,
+ )
+
+
+class AsyncActionsResource(AsyncAPIResource):
+ """Droplet actions are tasks that can be executed on a Droplet.
+
+ These can be
+ things like rebooting, resizing, snapshotting, etc.
+
+ Droplet action requests are generally targeted at one of the "actions"
+ endpoints for a specific Droplet. The specific actions are usually
+ initiated by sending a POST request with the action and arguments as
+ parameters.
+
+ Droplet action requests create a Droplet actions object, which can be used
+ to get information about the status of an action. Creating a Droplet
+ action is asynchronous: the HTTP call will return the action object before
+ the action has finished processing on the Droplet. The current status of
+ an action can be retrieved from either the Droplet actions endpoint or the
+ global actions endpoint. If a Droplet action is uncompleted it may block
+ the creation of a subsequent action for that Droplet, the locked attribute
+ of the Droplet will be true and attempts to create a Droplet action will
+ fail with a status of 422.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncActionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncActionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncActionsResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ action_id: int,
+ *,
+ droplet_id: int,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionRetrieveResponse:
+ """
+ To retrieve a Droplet action, send a GET request to
+ `/v2/droplets/$DROPLET_ID/actions/$ACTION_ID`.
+
+ The response will be a JSON object with a key called `action`. The value will be
+ a Droplet action object.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}/actions/{action_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions/{action_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ droplet_id: int,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionListResponse:
+ """
+ To retrieve a list of all actions that have been executed for a Droplet, send a
+ GET request to `/v2/droplets/$DROPLET_ID/actions`.
+
+ The results will be returned as a JSON object with an `actions` key. This will
+ be set to an array filled with `action` objects containing the standard `action`
+ attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ action_list_params.ActionListParams,
+ ),
+ ),
+ cast_to=ActionListResponse,
+ )
+
+ @overload
+ async def bulk_initiate(
+ self,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ tag_name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionBulkInitiateResponse:
+ """Some actions can be performed in bulk on tagged Droplets.
+
+ The actions can be
+ initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with
+ the action arguments.
+
+ Only a sub-set of action types are supported:
+
+ - `power_cycle`
+ - `power_on`
+ - `power_off`
+ - `shutdown`
+ - `enable_ipv6`
+ - `enable_backups`
+ - `disable_backups`
+ - `snapshot` (also requires `image:create` permission)
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or
+ `type`. Requires `tag:read` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def bulk_initiate(
+ self,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ tag_name: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionBulkInitiateResponse:
+ """Some actions can be performed in bulk on tagged Droplets.
+
+ The actions can be
+ initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with
+ the action arguments.
+
+ Only a sub-set of action types are supported:
+
+ - `power_cycle`
+ - `power_on`
+ - `power_off`
+ - `shutdown`
+ - `enable_ipv6`
+ - `enable_backups`
+ - `disable_backups`
+ - `snapshot` (also requires `image:create` permission)
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or
+ `type`. Requires `tag:read` scope.
+
+ name: The name to give the new snapshot of the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["type"])
+ async def bulk_initiate(
+ self,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ tag_name: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionBulkInitiateResponse:
+ return await self._post(
+ "/v2/droplets/actions"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/droplets/actions",
+ body=await async_maybe_transform(
+ {
+ "type": type,
+ "name": name,
+ },
+ action_bulk_initiate_params.ActionBulkInitiateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {"tag_name": tag_name}, action_bulk_initiate_params.ActionBulkInitiateParams
+ ),
+ ),
+ cast_to=ActionBulkInitiateResponse,
+ )
+
+ @overload
+ async def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ backup_policy: An object specifying the backup policy for the Droplet. If omitted, the backup
+ plan will default to daily.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ backup_policy: An object specifying the backup policy for the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ image: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ image: The ID of a backup of the current Droplet instance to restore from.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ disk: bool | Omit = omit,
+ size: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ disk: When `true`, the Droplet's disk will be resized in addition to its RAM and CPU.
+ This is a permanent change and cannot be reversed as a Droplet's disk size
+ cannot be decreased.
+
+ size: The slug identifier for the size to which you wish to resize the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ image: Union[str, int] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ image: The image ID of a public or private image or the slug identifier for a public
+ image. The Droplet will be rebuilt using this image as its base.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ name: The new name for the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ kernel: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ kernel: A unique number used to identify and reference a specific kernel.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ """
+ To initiate an action on a Droplet send a POST request to
+ `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details | Additionally Required Permission |
+ | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
+ | `enable_backups` | Enables backups for a Droplet | |
+ | `disable_backups` | Disables backups for a Droplet | |
+ | `change_backup_policy` | Update the backup policy for a Droplet | |
+ | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | |
+ | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | |
+ | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | |
+ | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | |
+ | `power_on` | Powers on a Droplet. | |
+ | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin |
+ | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin |
+ | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create |
+ | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin |
+ | `rename` | Renames a Droplet. | |
+ | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | |
+ | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | |
+ | `snapshot` | Takes a snapshot of a Droplet. | image:create |
+
+ Args:
+ type: The type of action to initiate for the Droplet.
+
+ name: The name to give the new snapshot of the Droplet.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["type"])
+ async def initiate(
+ self,
+ droplet_id: int,
+ *,
+ type: Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ],
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ image: int | Union[str, int] | Omit = omit,
+ disk: bool | Omit = omit,
+ size: str | Omit = omit,
+ name: str | Omit = omit,
+ kernel: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateResponse:
+ return await self._post(
+ f"/v2/droplets/{droplet_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions",
+ body=await async_maybe_transform(
+ {
+ "type": type,
+ "backup_policy": backup_policy,
+ "image": image,
+ "disk": disk,
+ "size": size,
+ "name": name,
+ "kernel": kernel,
+ },
+ action_initiate_params.ActionInitiateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionInitiateResponse,
+ )
+
+
+class ActionsResourceWithRawResponse:
+ def __init__(self, actions: ActionsResource) -> None:
+ self._actions = actions
+
+ self.retrieve = to_raw_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ actions.list,
+ )
+ self.bulk_initiate = to_raw_response_wrapper(
+ actions.bulk_initiate,
+ )
+ self.initiate = to_raw_response_wrapper(
+ actions.initiate,
+ )
+
+
+class AsyncActionsResourceWithRawResponse:
+ def __init__(self, actions: AsyncActionsResource) -> None:
+ self._actions = actions
+
+ self.retrieve = async_to_raw_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ actions.list,
+ )
+ self.bulk_initiate = async_to_raw_response_wrapper(
+ actions.bulk_initiate,
+ )
+ self.initiate = async_to_raw_response_wrapper(
+ actions.initiate,
+ )
+
+
+class ActionsResourceWithStreamingResponse:
+ def __init__(self, actions: ActionsResource) -> None:
+ self._actions = actions
+
+ self.retrieve = to_streamed_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ actions.list,
+ )
+ self.bulk_initiate = to_streamed_response_wrapper(
+ actions.bulk_initiate,
+ )
+ self.initiate = to_streamed_response_wrapper(
+ actions.initiate,
+ )
+
+
+class AsyncActionsResourceWithStreamingResponse:
+ def __init__(self, actions: AsyncActionsResource) -> None:
+ self._actions = actions
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ actions.list,
+ )
+ self.bulk_initiate = async_to_streamed_response_wrapper(
+ actions.bulk_initiate,
+ )
+ self.initiate = async_to_streamed_response_wrapper(
+ actions.initiate,
+ )
diff --git a/src/gradient/resources/gpu_droplets/autoscale.py b/src/gradient/resources/gpu_droplets/autoscale.py
new file mode 100644
index 00000000..b529d9da
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/autoscale.py
@@ -0,0 +1,975 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.gpu_droplets import (
+ autoscale_list_params,
+ autoscale_create_params,
+ autoscale_update_params,
+ autoscale_list_history_params,
+ autoscale_list_members_params,
+)
+from ...types.gpu_droplets.autoscale_list_response import AutoscaleListResponse
+from ...types.gpu_droplets.autoscale_create_response import AutoscaleCreateResponse
+from ...types.gpu_droplets.autoscale_update_response import AutoscaleUpdateResponse
+from ...types.gpu_droplets.autoscale_retrieve_response import AutoscaleRetrieveResponse
+from ...types.gpu_droplets.autoscale_list_history_response import AutoscaleListHistoryResponse
+from ...types.gpu_droplets.autoscale_list_members_response import AutoscaleListMembersResponse
+from ...types.gpu_droplets.autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam
+
+__all__ = ["AutoscaleResource", "AsyncAutoscaleResource"]
+
+
+class AutoscaleResource(SyncAPIResource):
+ """
+ Droplet autoscale pools manage automatic horizontal scaling for your applications based on resource usage (CPU, memory, or both) or a static configuration.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AutoscaleResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AutoscaleResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AutoscaleResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AutoscaleResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ config: autoscale_create_params.Config,
+ droplet_template: AutoscalePoolDropletTemplateParam,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleCreateResponse:
+ """
+ To create a new autoscale pool, send a POST request to `/v2/droplets/autoscale`
+ setting the required attributes.
+
+ The response body will contain a JSON object with a key called `autoscale_pool`
+ containing the standard attributes for the new autoscale pool.
+
+ Args:
+ config: The scaling configuration for an autoscale pool, which is how the pool scales up
+ and down (either by resource utilization or static configuration).
+
+ name: The human-readable name of the autoscale pool. This field cannot be updated
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/droplets/autoscale"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/droplets/autoscale",
+ body=maybe_transform(
+ {
+ "config": config,
+ "droplet_template": droplet_template,
+ "name": name,
+ },
+ autoscale_create_params.AutoscaleCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AutoscaleCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ autoscale_pool_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleRetrieveResponse:
+ """
+ To show information about an individual autoscale pool, send a GET request to
+ `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ return self._get(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AutoscaleRetrieveResponse,
+ )
+
+ def update(
+ self,
+ autoscale_pool_id: str,
+ *,
+ config: autoscale_update_params.Config,
+ droplet_template: AutoscalePoolDropletTemplateParam,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleUpdateResponse:
+ """
+ To update the configuration of an existing autoscale pool, send a PUT request to
+ `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. The request must contain a full
+ representation of the autoscale pool including existing attributes.
+
+ Args:
+ config: The scaling configuration for an autoscale pool, which is how the pool scales up
+ and down (either by resource utilization or static configuration).
+
+ name: The human-readable name of the autoscale pool. This field cannot be updated
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ return self._put(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}",
+ body=maybe_transform(
+ {
+ "config": config,
+ "droplet_template": droplet_template,
+ "name": name,
+ },
+ autoscale_update_params.AutoscaleUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AutoscaleUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ name: str | Omit = omit,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleListResponse:
+ """
+ To list all autoscale pools in your team, send a GET request to
+ `/v2/droplets/autoscale`. The response body will be a JSON object with a key of
+ `autoscale_pools` containing an array of autoscale pool objects. These each
+ contain the standard autoscale pool attributes.
+
+ Args:
+ name: The name of the autoscale pool
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/droplets/autoscale"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/droplets/autoscale",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "name": name,
+ "page": page,
+ "per_page": per_page,
+ },
+ autoscale_list_params.AutoscaleListParams,
+ ),
+ ),
+ cast_to=AutoscaleListResponse,
+ )
+
+ def delete(
+ self,
+ autoscale_pool_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To destroy an autoscale pool, send a DELETE request to the
+ `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID` endpoint.
+
+ A successful response will include a 202 response code and no content.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def delete_dangerous(
+ self,
+ autoscale_pool_id: str,
+ *,
+ x_dangerous: bool,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To destroy an autoscale pool and its associated resources (Droplets), send a
+ DELETE request to the `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/dangerous`
+ endpoint.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")})
+ return self._delete(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}/dangerous"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/dangerous",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def list_history(
+ self,
+ autoscale_pool_id: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleListHistoryResponse:
+ """
+ To list all of the scaling history events of an autoscale pool, send a GET
+ request to `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/history`.
+
+ The response body will be a JSON object with a key of `history`. This will be
+ set to an array containing objects each representing a history event.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ return self._get(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}/history"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/history",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ autoscale_list_history_params.AutoscaleListHistoryParams,
+ ),
+ ),
+ cast_to=AutoscaleListHistoryResponse,
+ )
+
+ def list_members(
+ self,
+ autoscale_pool_id: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleListMembersResponse:
+ """
+ To list the Droplets in an autoscale pool, send a GET request to
+ `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/members`.
+
+ The response body will be a JSON object with a key of `droplets`. This will be
+ set to an array containing information about each of the Droplets in the
+ autoscale pool.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ return self._get(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}/members"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/members",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ autoscale_list_members_params.AutoscaleListMembersParams,
+ ),
+ ),
+ cast_to=AutoscaleListMembersResponse,
+ )
+
+
+class AsyncAutoscaleResource(AsyncAPIResource):
+ """
+ Droplet autoscale pools manage automatic horizontal scaling for your applications based on resource usage (CPU, memory, or both) or a static configuration.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAutoscaleResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAutoscaleResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAutoscaleResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncAutoscaleResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ config: autoscale_create_params.Config,
+ droplet_template: AutoscalePoolDropletTemplateParam,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleCreateResponse:
+ """
+ To create a new autoscale pool, send a POST request to `/v2/droplets/autoscale`
+ setting the required attributes.
+
+ The response body will contain a JSON object with a key called `autoscale_pool`
+ containing the standard attributes for the new autoscale pool.
+
+ Args:
+ config: The scaling configuration for an autoscale pool, which is how the pool scales up
+ and down (either by resource utilization or static configuration).
+
+ name: The human-readable name of the autoscale pool. This field cannot be updated
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/droplets/autoscale"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/droplets/autoscale",
+ body=await async_maybe_transform(
+ {
+ "config": config,
+ "droplet_template": droplet_template,
+ "name": name,
+ },
+ autoscale_create_params.AutoscaleCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AutoscaleCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ autoscale_pool_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleRetrieveResponse:
+ """
+ To show information about an individual autoscale pool, send a GET request to
+ `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ return await self._get(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AutoscaleRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ autoscale_pool_id: str,
+ *,
+ config: autoscale_update_params.Config,
+ droplet_template: AutoscalePoolDropletTemplateParam,
+ name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleUpdateResponse:
+ """
+ To update the configuration of an existing autoscale pool, send a PUT request to
+ `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. The request must contain a full
+ representation of the autoscale pool including existing attributes.
+
+ Args:
+ config: The scaling configuration for an autoscale pool, which is how the pool scales up
+ and down (either by resource utilization or static configuration).
+
+ name: The human-readable name of the autoscale pool. This field cannot be updated
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ return await self._put(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}",
+ body=await async_maybe_transform(
+ {
+ "config": config,
+ "droplet_template": droplet_template,
+ "name": name,
+ },
+ autoscale_update_params.AutoscaleUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AutoscaleUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ name: str | Omit = omit,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleListResponse:
+ """
+ To list all autoscale pools in your team, send a GET request to
+ `/v2/droplets/autoscale`. The response body will be a JSON object with a key of
+ `autoscale_pools` containing an array of autoscale pool objects. These each
+ contain the standard autoscale pool attributes.
+
+ Args:
+ name: The name of the autoscale pool
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/droplets/autoscale"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/droplets/autoscale",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "name": name,
+ "page": page,
+ "per_page": per_page,
+ },
+ autoscale_list_params.AutoscaleListParams,
+ ),
+ ),
+ cast_to=AutoscaleListResponse,
+ )
+
+ async def delete(
+ self,
+ autoscale_pool_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To destroy an autoscale pool, send a DELETE request to the
+ `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID` endpoint.
+
+ A successful response will include a 202 response code and no content.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def delete_dangerous(
+ self,
+ autoscale_pool_id: str,
+ *,
+ x_dangerous: bool,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To destroy an autoscale pool and its associated resources (Droplets), send a
+ DELETE request to the `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/dangerous`
+ endpoint.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")})
+ return await self._delete(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}/dangerous"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/dangerous",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def list_history(
+ self,
+ autoscale_pool_id: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleListHistoryResponse:
+ """
+ To list all of the scaling history events of an autoscale pool, send a GET
+ request to `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/history`.
+
+ The response body will be a JSON object with a key of `history`. This will be
+ set to an array containing objects each representing a history event.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ return await self._get(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}/history"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/history",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ autoscale_list_history_params.AutoscaleListHistoryParams,
+ ),
+ ),
+ cast_to=AutoscaleListHistoryResponse,
+ )
+
+ async def list_members(
+ self,
+ autoscale_pool_id: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AutoscaleListMembersResponse:
+ """
+ To list the Droplets in an autoscale pool, send a GET request to
+ `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/members`.
+
+ The response body will be a JSON object with a key of `droplets`. This will be
+ set to an array containing information about each of the Droplets in the
+ autoscale pool.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not autoscale_pool_id:
+ raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}")
+ return await self._get(
+ f"/v2/droplets/autoscale/{autoscale_pool_id}/members"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/members",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ autoscale_list_members_params.AutoscaleListMembersParams,
+ ),
+ ),
+ cast_to=AutoscaleListMembersResponse,
+ )
+
+
+class AutoscaleResourceWithRawResponse:
+ def __init__(self, autoscale: AutoscaleResource) -> None:
+ self._autoscale = autoscale
+
+ self.create = to_raw_response_wrapper(
+ autoscale.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ autoscale.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ autoscale.update,
+ )
+ self.list = to_raw_response_wrapper(
+ autoscale.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ autoscale.delete,
+ )
+ self.delete_dangerous = to_raw_response_wrapper(
+ autoscale.delete_dangerous,
+ )
+ self.list_history = to_raw_response_wrapper(
+ autoscale.list_history,
+ )
+ self.list_members = to_raw_response_wrapper(
+ autoscale.list_members,
+ )
+
+
+class AsyncAutoscaleResourceWithRawResponse:
+ def __init__(self, autoscale: AsyncAutoscaleResource) -> None:
+ self._autoscale = autoscale
+
+ self.create = async_to_raw_response_wrapper(
+ autoscale.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ autoscale.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ autoscale.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ autoscale.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ autoscale.delete,
+ )
+ self.delete_dangerous = async_to_raw_response_wrapper(
+ autoscale.delete_dangerous,
+ )
+ self.list_history = async_to_raw_response_wrapper(
+ autoscale.list_history,
+ )
+ self.list_members = async_to_raw_response_wrapper(
+ autoscale.list_members,
+ )
+
+
+class AutoscaleResourceWithStreamingResponse:
+ def __init__(self, autoscale: AutoscaleResource) -> None:
+ self._autoscale = autoscale
+
+ self.create = to_streamed_response_wrapper(
+ autoscale.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ autoscale.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ autoscale.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ autoscale.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ autoscale.delete,
+ )
+ self.delete_dangerous = to_streamed_response_wrapper(
+ autoscale.delete_dangerous,
+ )
+ self.list_history = to_streamed_response_wrapper(
+ autoscale.list_history,
+ )
+ self.list_members = to_streamed_response_wrapper(
+ autoscale.list_members,
+ )
+
+
+class AsyncAutoscaleResourceWithStreamingResponse:
+ def __init__(self, autoscale: AsyncAutoscaleResource) -> None:
+ self._autoscale = autoscale
+
+ self.create = async_to_streamed_response_wrapper(
+ autoscale.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ autoscale.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ autoscale.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ autoscale.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ autoscale.delete,
+ )
+ self.delete_dangerous = async_to_streamed_response_wrapper(
+ autoscale.delete_dangerous,
+ )
+ self.list_history = async_to_streamed_response_wrapper(
+ autoscale.list_history,
+ )
+ self.list_members = async_to_streamed_response_wrapper(
+ autoscale.list_members,
+ )
diff --git a/src/gradient/resources/gpu_droplets/backups.py b/src/gradient/resources/gpu_droplets/backups.py
new file mode 100644
index 00000000..a924c93b
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/backups.py
@@ -0,0 +1,482 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.gpu_droplets import backup_list_params, backup_list_policies_params
+from ...types.gpu_droplets.backup_list_response import BackupListResponse
+from ...types.gpu_droplets.backup_list_policies_response import BackupListPoliciesResponse
+from ...types.gpu_droplets.backup_retrieve_policy_response import BackupRetrievePolicyResponse
+from ...types.gpu_droplets.backup_list_supported_policies_response import BackupListSupportedPoliciesResponse
+
+__all__ = ["BackupsResource", "AsyncBackupsResource"]
+
+
+class BackupsResource(SyncAPIResource):
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> BackupsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return BackupsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> BackupsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return BackupsResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ droplet_id: int,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BackupListResponse:
+ """
+ To retrieve any backups associated with a Droplet, send a GET request to
+ `/v2/droplets/$DROPLET_ID/backups`.
+
+ You will get back a JSON object that has a `backups` key. This will be set to an
+ array of backup objects, each of which contain the standard Droplet backup
+ attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}/backups"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ backup_list_params.BackupListParams,
+ ),
+ ),
+ cast_to=BackupListResponse,
+ )
+
+ def list_policies(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BackupListPoliciesResponse:
+ """
+ To list information about the backup policies for all Droplets in the account,
+ send a GET request to `/v2/droplets/backups/policies`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/droplets/backups/policies"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/droplets/backups/policies",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ backup_list_policies_params.BackupListPoliciesParams,
+ ),
+ ),
+ cast_to=BackupListPoliciesResponse,
+ )
+
+ def list_supported_policies(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BackupListSupportedPoliciesResponse:
+ """
+ To retrieve a list of all supported Droplet backup policies, send a GET request
+ to `/v2/droplets/backups/supported_policies`.
+ """
+ return self._get(
+ "/v2/droplets/backups/supported_policies"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/droplets/backups/supported_policies",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BackupListSupportedPoliciesResponse,
+ )
+
+ def retrieve_policy(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BackupRetrievePolicyResponse:
+ """
+ To show information about an individual Droplet's backup policy, send a GET
+ request to `/v2/droplets/$DROPLET_ID/backups/policy`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}/backups/policy"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups/policy",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BackupRetrievePolicyResponse,
+ )
+
+
+class AsyncBackupsResource(AsyncAPIResource):
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncBackupsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncBackupsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncBackupsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncBackupsResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ droplet_id: int,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BackupListResponse:
+ """
+ To retrieve any backups associated with a Droplet, send a GET request to
+ `/v2/droplets/$DROPLET_ID/backups`.
+
+ You will get back a JSON object that has a `backups` key. This will be set to an
+ array of backup objects, each of which contain the standard Droplet backup
+ attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}/backups"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ backup_list_params.BackupListParams,
+ ),
+ ),
+ cast_to=BackupListResponse,
+ )
+
+ async def list_policies(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BackupListPoliciesResponse:
+ """
+ To list information about the backup policies for all Droplets in the account,
+ send a GET request to `/v2/droplets/backups/policies`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/droplets/backups/policies"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/droplets/backups/policies",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ backup_list_policies_params.BackupListPoliciesParams,
+ ),
+ ),
+ cast_to=BackupListPoliciesResponse,
+ )
+
+ async def list_supported_policies(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BackupListSupportedPoliciesResponse:
+ """
+ To retrieve a list of all supported Droplet backup policies, send a GET request
+ to `/v2/droplets/backups/supported_policies`.
+ """
+ return await self._get(
+ "/v2/droplets/backups/supported_policies"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/droplets/backups/supported_policies",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BackupListSupportedPoliciesResponse,
+ )
+
+ async def retrieve_policy(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BackupRetrievePolicyResponse:
+ """
+ To show information about an individual Droplet's backup policy, send a GET
+ request to `/v2/droplets/$DROPLET_ID/backups/policy`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}/backups/policy"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups/policy",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BackupRetrievePolicyResponse,
+ )
+
+
+class BackupsResourceWithRawResponse:
+ def __init__(self, backups: BackupsResource) -> None:
+ self._backups = backups
+
+ self.list = to_raw_response_wrapper(
+ backups.list,
+ )
+ self.list_policies = to_raw_response_wrapper(
+ backups.list_policies,
+ )
+ self.list_supported_policies = to_raw_response_wrapper(
+ backups.list_supported_policies,
+ )
+ self.retrieve_policy = to_raw_response_wrapper(
+ backups.retrieve_policy,
+ )
+
+
+class AsyncBackupsResourceWithRawResponse:
+ def __init__(self, backups: AsyncBackupsResource) -> None:
+ self._backups = backups
+
+ self.list = async_to_raw_response_wrapper(
+ backups.list,
+ )
+ self.list_policies = async_to_raw_response_wrapper(
+ backups.list_policies,
+ )
+ self.list_supported_policies = async_to_raw_response_wrapper(
+ backups.list_supported_policies,
+ )
+ self.retrieve_policy = async_to_raw_response_wrapper(
+ backups.retrieve_policy,
+ )
+
+
+class BackupsResourceWithStreamingResponse:
+ def __init__(self, backups: BackupsResource) -> None:
+ self._backups = backups
+
+ self.list = to_streamed_response_wrapper(
+ backups.list,
+ )
+ self.list_policies = to_streamed_response_wrapper(
+ backups.list_policies,
+ )
+ self.list_supported_policies = to_streamed_response_wrapper(
+ backups.list_supported_policies,
+ )
+ self.retrieve_policy = to_streamed_response_wrapper(
+ backups.retrieve_policy,
+ )
+
+
+class AsyncBackupsResourceWithStreamingResponse:
+ def __init__(self, backups: AsyncBackupsResource) -> None:
+ self._backups = backups
+
+ self.list = async_to_streamed_response_wrapper(
+ backups.list,
+ )
+ self.list_policies = async_to_streamed_response_wrapper(
+ backups.list_policies,
+ )
+ self.list_supported_policies = async_to_streamed_response_wrapper(
+ backups.list_supported_policies,
+ )
+ self.retrieve_policy = async_to_streamed_response_wrapper(
+ backups.retrieve_policy,
+ )
diff --git a/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py b/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py
new file mode 100644
index 00000000..04c8f5b3
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py
@@ -0,0 +1,644 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.gpu_droplets import destroy_with_associated_resource_delete_selective_params
+from ...types.gpu_droplets.destroy_with_associated_resource_list_response import (
+ DestroyWithAssociatedResourceListResponse,
+)
+from ...types.gpu_droplets.destroy_with_associated_resource_check_status_response import (
+ DestroyWithAssociatedResourceCheckStatusResponse,
+)
+
+__all__ = ["DestroyWithAssociatedResourcesResource", "AsyncDestroyWithAssociatedResourcesResource"]
+
+
+class DestroyWithAssociatedResourcesResource(SyncAPIResource):
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> DestroyWithAssociatedResourcesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return DestroyWithAssociatedResourcesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> DestroyWithAssociatedResourcesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return DestroyWithAssociatedResourcesResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DestroyWithAssociatedResourceListResponse:
+ """
+ To list the associated billable resources that can be destroyed along with a
+ Droplet, send a GET request to the
+ `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources` endpoint.
+
+ This endpoint will only return resources that you are authorized to see. For
+ example, to see associated Reserved IPs, include the `reserved_ip:read` scope.
+
+ The response will be a JSON object containing `snapshots`, `volumes`, and
+ `volume_snapshots` keys. Each will be set to an array of objects containing
+ information about the associated resources.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}/destroy_with_associated_resources"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DestroyWithAssociatedResourceListResponse,
+ )
+
+ def check_status(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DestroyWithAssociatedResourceCheckStatusResponse:
+ """
+ To check on the status of a request to destroy a Droplet with its associated
+ resources, send a GET request to the
+ `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/status` endpoint.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/status"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/status",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DestroyWithAssociatedResourceCheckStatusResponse,
+ )
+
+ def delete_dangerous(
+ self,
+ droplet_id: int,
+ *,
+ x_dangerous: bool,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To destroy a Droplet along with all of its associated resources, send a DELETE
+ request to the
+ `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/dangerous` endpoint.
+ The headers of this request must include an `X-Dangerous` key set to `true`. To
+ preview which resources will be destroyed, first query the Droplet's associated
+ resources. This operation _can not_ be reverse and should be used with caution.
+
+ A successful response will include a 202 response code and no content. Use the
+ status endpoint to check on the success or failure of the destruction of the
+ individual resources.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")})
+ return self._delete(
+ f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def delete_selective(
+ self,
+ droplet_id: int,
+ *,
+ floating_ips: SequenceNotStr[str] | Omit = omit,
+ reserved_ips: SequenceNotStr[str] | Omit = omit,
+ snapshots: SequenceNotStr[str] | Omit = omit,
+ volume_snapshots: SequenceNotStr[str] | Omit = omit,
+ volumes: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To destroy a Droplet along with a sub-set of its associated resources, send a
+ DELETE request to the
+ `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/selective` endpoint.
+ The JSON body of the request should include `reserved_ips`, `snapshots`,
+ `volumes`, or `volume_snapshots` keys each set to an array of IDs for the
+ associated resources to be destroyed. The IDs can be found by querying the
+ Droplet's associated resources. Any associated resource not included in the
+ request will remain and continue to accrue changes on your account.
+
+ A successful response will include a 202 response code and no content. Use the
+ status endpoint to check on the success or failure of the destruction of the
+ individual resources.
+
+ Args:
+ floating_ips: An array of unique identifiers for the floating IPs to be scheduled for
+ deletion.
+
+ reserved_ips: An array of unique identifiers for the reserved IPs to be scheduled for
+ deletion.
+
+ snapshots: An array of unique identifiers for the snapshots to be scheduled for deletion.
+
+ volume_snapshots: An array of unique identifiers for the volume snapshots to be scheduled for
+ deletion.
+
+ volumes: An array of unique identifiers for the volumes to be scheduled for deletion.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective",
+ body=maybe_transform(
+ {
+ "floating_ips": floating_ips,
+ "reserved_ips": reserved_ips,
+ "snapshots": snapshots,
+ "volume_snapshots": volume_snapshots,
+ "volumes": volumes,
+ },
+ destroy_with_associated_resource_delete_selective_params.DestroyWithAssociatedResourceDeleteSelectiveParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def retry(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ If the status of a request to destroy a Droplet with its associated resources
+ reported any errors, it can be retried by sending a POST request to the
+ `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/retry` endpoint.
+
+ Only one destroy can be active at a time per Droplet. If a retry is issued while
+ another destroy is in progress for the Droplet a 409 status code will be
+ returned. A successful response will include a 202 response code and no content.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._post(
+ f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncDestroyWithAssociatedResourcesResource(AsyncAPIResource):
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncDestroyWithAssociatedResourcesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncDestroyWithAssociatedResourcesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DestroyWithAssociatedResourceListResponse:
+ """
+ To list the associated billable resources that can be destroyed along with a
+ Droplet, send a GET request to the
+ `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources` endpoint.
+
+ This endpoint will only return resources that you are authorized to see. For
+ example, to see associated Reserved IPs, include the `reserved_ip:read` scope.
+
+ The response will be a JSON object containing `snapshots`, `volumes`, and
+ `volume_snapshots` keys. Each will be set to an array of objects containing
+ information about the associated resources.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}/destroy_with_associated_resources"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DestroyWithAssociatedResourceListResponse,
+ )
+
+ async def check_status(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DestroyWithAssociatedResourceCheckStatusResponse:
+ """
+ To check on the status of a request to destroy a Droplet with its associated
+ resources, send a GET request to the
+ `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/status` endpoint.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/status"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/status",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DestroyWithAssociatedResourceCheckStatusResponse,
+ )
+
+ async def delete_dangerous(
+ self,
+ droplet_id: int,
+ *,
+ x_dangerous: bool,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To destroy a Droplet along with all of its associated resources, send a DELETE
+ request to the
+ `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/dangerous` endpoint.
+ The headers of this request must include an `X-Dangerous` key set to `true`. To
+ preview which resources will be destroyed, first query the Droplet's associated
+ resources. This operation _can not_ be reverse and should be used with caution.
+
+ A successful response will include a 202 response code and no content. Use the
+ status endpoint to check on the success or failure of the destruction of the
+ individual resources.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")})
+ return await self._delete(
+ f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def delete_selective(
+ self,
+ droplet_id: int,
+ *,
+ floating_ips: SequenceNotStr[str] | Omit = omit,
+ reserved_ips: SequenceNotStr[str] | Omit = omit,
+ snapshots: SequenceNotStr[str] | Omit = omit,
+ volume_snapshots: SequenceNotStr[str] | Omit = omit,
+ volumes: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To destroy a Droplet along with a sub-set of its associated resources, send a
+ DELETE request to the
+ `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/selective` endpoint.
+ The JSON body of the request should include `reserved_ips`, `snapshots`,
+ `volumes`, or `volume_snapshots` keys each set to an array of IDs for the
+ associated resources to be destroyed. The IDs can be found by querying the
+ Droplet's associated resources. Any associated resource not included in the
+ request will remain and continue to accrue changes on your account.
+
+ A successful response will include a 202 response code and no content. Use the
+ status endpoint to check on the success or failure of the destruction of the
+ individual resources.
+
+ Args:
+ floating_ips: An array of unique identifiers for the floating IPs to be scheduled for
+ deletion.
+
+ reserved_ips: An array of unique identifiers for the reserved IPs to be scheduled for
+ deletion.
+
+ snapshots: An array of unique identifiers for the snapshots to be scheduled for deletion.
+
+ volume_snapshots: An array of unique identifiers for the volume snapshots to be scheduled for
+ deletion.
+
+ volumes: An array of unique identifiers for the volumes to be scheduled for deletion.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective",
+ body=await async_maybe_transform(
+ {
+ "floating_ips": floating_ips,
+ "reserved_ips": reserved_ips,
+ "snapshots": snapshots,
+ "volume_snapshots": volume_snapshots,
+ "volumes": volumes,
+ },
+ destroy_with_associated_resource_delete_selective_params.DestroyWithAssociatedResourceDeleteSelectiveParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def retry(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ If the status of a request to destroy a Droplet with its associated resources
+ reported any errors, it can be retried by sending a POST request to the
+ `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/retry` endpoint.
+
+ Only one destroy can be active at a time per Droplet. If a retry is issued while
+ another destroy is in progress for the Droplet a 409 status code will be
+ returned. A successful response will include a 202 response code and no content.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._post(
+ f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class DestroyWithAssociatedResourcesResourceWithRawResponse:
+ def __init__(self, destroy_with_associated_resources: DestroyWithAssociatedResourcesResource) -> None:
+ self._destroy_with_associated_resources = destroy_with_associated_resources
+
+ self.list = to_raw_response_wrapper(
+ destroy_with_associated_resources.list,
+ )
+ self.check_status = to_raw_response_wrapper(
+ destroy_with_associated_resources.check_status,
+ )
+ self.delete_dangerous = to_raw_response_wrapper(
+ destroy_with_associated_resources.delete_dangerous,
+ )
+ self.delete_selective = to_raw_response_wrapper(
+ destroy_with_associated_resources.delete_selective,
+ )
+ self.retry = to_raw_response_wrapper(
+ destroy_with_associated_resources.retry,
+ )
+
+
+class AsyncDestroyWithAssociatedResourcesResourceWithRawResponse:
+ def __init__(self, destroy_with_associated_resources: AsyncDestroyWithAssociatedResourcesResource) -> None:
+ self._destroy_with_associated_resources = destroy_with_associated_resources
+
+ self.list = async_to_raw_response_wrapper(
+ destroy_with_associated_resources.list,
+ )
+ self.check_status = async_to_raw_response_wrapper(
+ destroy_with_associated_resources.check_status,
+ )
+ self.delete_dangerous = async_to_raw_response_wrapper(
+ destroy_with_associated_resources.delete_dangerous,
+ )
+ self.delete_selective = async_to_raw_response_wrapper(
+ destroy_with_associated_resources.delete_selective,
+ )
+ self.retry = async_to_raw_response_wrapper(
+ destroy_with_associated_resources.retry,
+ )
+
+
+class DestroyWithAssociatedResourcesResourceWithStreamingResponse:
+ def __init__(self, destroy_with_associated_resources: DestroyWithAssociatedResourcesResource) -> None:
+ self._destroy_with_associated_resources = destroy_with_associated_resources
+
+ self.list = to_streamed_response_wrapper(
+ destroy_with_associated_resources.list,
+ )
+ self.check_status = to_streamed_response_wrapper(
+ destroy_with_associated_resources.check_status,
+ )
+ self.delete_dangerous = to_streamed_response_wrapper(
+ destroy_with_associated_resources.delete_dangerous,
+ )
+ self.delete_selective = to_streamed_response_wrapper(
+ destroy_with_associated_resources.delete_selective,
+ )
+ self.retry = to_streamed_response_wrapper(
+ destroy_with_associated_resources.retry,
+ )
+
+
+class AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse:
+ def __init__(self, destroy_with_associated_resources: AsyncDestroyWithAssociatedResourcesResource) -> None:
+ self._destroy_with_associated_resources = destroy_with_associated_resources
+
+ self.list = async_to_streamed_response_wrapper(
+ destroy_with_associated_resources.list,
+ )
+ self.check_status = async_to_streamed_response_wrapper(
+ destroy_with_associated_resources.check_status,
+ )
+ self.delete_dangerous = async_to_streamed_response_wrapper(
+ destroy_with_associated_resources.delete_dangerous,
+ )
+ self.delete_selective = async_to_streamed_response_wrapper(
+ destroy_with_associated_resources.delete_selective,
+ )
+ self.retry = async_to_streamed_response_wrapper(
+ destroy_with_associated_resources.retry,
+ )
diff --git a/src/gradient/resources/gpu_droplets/firewalls/__init__.py b/src/gradient/resources/gpu_droplets/firewalls/__init__.py
new file mode 100644
index 00000000..e9cb832f
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/firewalls/__init__.py
@@ -0,0 +1,61 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .tags import (
+ TagsResource,
+ AsyncTagsResource,
+ TagsResourceWithRawResponse,
+ AsyncTagsResourceWithRawResponse,
+ TagsResourceWithStreamingResponse,
+ AsyncTagsResourceWithStreamingResponse,
+)
+from .rules import (
+ RulesResource,
+ AsyncRulesResource,
+ RulesResourceWithRawResponse,
+ AsyncRulesResourceWithRawResponse,
+ RulesResourceWithStreamingResponse,
+ AsyncRulesResourceWithStreamingResponse,
+)
+from .droplets import (
+ DropletsResource,
+ AsyncDropletsResource,
+ DropletsResourceWithRawResponse,
+ AsyncDropletsResourceWithRawResponse,
+ DropletsResourceWithStreamingResponse,
+ AsyncDropletsResourceWithStreamingResponse,
+)
+from .firewalls import (
+ FirewallsResource,
+ AsyncFirewallsResource,
+ FirewallsResourceWithRawResponse,
+ AsyncFirewallsResourceWithRawResponse,
+ FirewallsResourceWithStreamingResponse,
+ AsyncFirewallsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "DropletsResource",
+ "AsyncDropletsResource",
+ "DropletsResourceWithRawResponse",
+ "AsyncDropletsResourceWithRawResponse",
+ "DropletsResourceWithStreamingResponse",
+ "AsyncDropletsResourceWithStreamingResponse",
+ "TagsResource",
+ "AsyncTagsResource",
+ "TagsResourceWithRawResponse",
+ "AsyncTagsResourceWithRawResponse",
+ "TagsResourceWithStreamingResponse",
+ "AsyncTagsResourceWithStreamingResponse",
+ "RulesResource",
+ "AsyncRulesResource",
+ "RulesResourceWithRawResponse",
+ "AsyncRulesResourceWithRawResponse",
+ "RulesResourceWithStreamingResponse",
+ "AsyncRulesResourceWithStreamingResponse",
+ "FirewallsResource",
+ "AsyncFirewallsResource",
+ "FirewallsResourceWithRawResponse",
+ "AsyncFirewallsResourceWithRawResponse",
+ "FirewallsResourceWithStreamingResponse",
+ "AsyncFirewallsResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/gpu_droplets/firewalls/droplets.py b/src/gradient/resources/gpu_droplets/firewalls/droplets.py
new file mode 100644
index 00000000..b77bf1dc
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/firewalls/droplets.py
@@ -0,0 +1,312 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+
+import httpx
+
+from ...._types import Body, Query, Headers, NoneType, NotGiven, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets.firewalls import droplet_add_params, droplet_remove_params
+
+__all__ = ["DropletsResource", "AsyncDropletsResource"]
+
+
+class DropletsResource(SyncAPIResource):
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> DropletsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return DropletsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> DropletsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return DropletsResourceWithStreamingResponse(self)
+
+ def add(
+ self,
+ firewall_id: str,
+ *,
+ droplet_ids: Iterable[int],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To assign a Droplet to a firewall, send a POST request to
+ `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should
+ be a `droplet_ids` attribute containing a list of Droplet IDs.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ droplet_ids: An array containing the IDs of the Droplets to be assigned to the firewall.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._post(
+ f"/v2/firewalls/{firewall_id}/droplets"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets",
+ body=maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def remove(
+ self,
+ firewall_id: str,
+ *,
+ droplet_ids: Iterable[int],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To remove a Droplet from a firewall, send a DELETE request to
+ `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should
+ be a `droplet_ids` attribute containing a list of Droplet IDs.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ droplet_ids: An array containing the IDs of the Droplets to be removed from the firewall.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/firewalls/{firewall_id}/droplets"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets",
+ body=maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncDropletsResource(AsyncAPIResource):
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncDropletsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncDropletsResourceWithStreamingResponse(self)
+
+ async def add(
+ self,
+ firewall_id: str,
+ *,
+ droplet_ids: Iterable[int],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To assign a Droplet to a firewall, send a POST request to
+ `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should
+ be a `droplet_ids` attribute containing a list of Droplet IDs.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ droplet_ids: An array containing the IDs of the Droplets to be assigned to the firewall.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._post(
+ f"/v2/firewalls/{firewall_id}/droplets"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets",
+ body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def remove(
+ self,
+ firewall_id: str,
+ *,
+ droplet_ids: Iterable[int],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To remove a Droplet from a firewall, send a DELETE request to
+ `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should
+ be a `droplet_ids` attribute containing a list of Droplet IDs.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ droplet_ids: An array containing the IDs of the Droplets to be removed from the firewall.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/firewalls/{firewall_id}/droplets"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets",
+ body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class DropletsResourceWithRawResponse:
+ def __init__(self, droplets: DropletsResource) -> None:
+ self._droplets = droplets
+
+ self.add = to_raw_response_wrapper(
+ droplets.add,
+ )
+ self.remove = to_raw_response_wrapper(
+ droplets.remove,
+ )
+
+
+class AsyncDropletsResourceWithRawResponse:
+ def __init__(self, droplets: AsyncDropletsResource) -> None:
+ self._droplets = droplets
+
+ self.add = async_to_raw_response_wrapper(
+ droplets.add,
+ )
+ self.remove = async_to_raw_response_wrapper(
+ droplets.remove,
+ )
+
+
+class DropletsResourceWithStreamingResponse:
+ def __init__(self, droplets: DropletsResource) -> None:
+ self._droplets = droplets
+
+ self.add = to_streamed_response_wrapper(
+ droplets.add,
+ )
+ self.remove = to_streamed_response_wrapper(
+ droplets.remove,
+ )
+
+
+class AsyncDropletsResourceWithStreamingResponse:
+ def __init__(self, droplets: AsyncDropletsResource) -> None:
+ self._droplets = droplets
+
+ self.add = async_to_streamed_response_wrapper(
+ droplets.add,
+ )
+ self.remove = async_to_streamed_response_wrapper(
+ droplets.remove,
+ )
diff --git a/src/gradient/resources/gpu_droplets/firewalls/firewalls.py b/src/gradient/resources/gpu_droplets/firewalls/firewalls.py
new file mode 100644
index 00000000..4367941c
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/firewalls/firewalls.py
@@ -0,0 +1,789 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from .tags import (
+ TagsResource,
+ AsyncTagsResource,
+ TagsResourceWithRawResponse,
+ AsyncTagsResourceWithRawResponse,
+ TagsResourceWithStreamingResponse,
+ AsyncTagsResourceWithStreamingResponse,
+)
+from .rules import (
+ RulesResource,
+ AsyncRulesResource,
+ RulesResourceWithRawResponse,
+ AsyncRulesResourceWithRawResponse,
+ RulesResourceWithStreamingResponse,
+ AsyncRulesResourceWithStreamingResponse,
+)
+from .droplets import (
+ DropletsResource,
+ AsyncDropletsResource,
+ DropletsResourceWithRawResponse,
+ AsyncDropletsResourceWithRawResponse,
+ DropletsResourceWithStreamingResponse,
+ AsyncDropletsResourceWithStreamingResponse,
+)
+from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets import firewall_list_params, firewall_create_params, firewall_update_params
+from ....types.gpu_droplets.firewall_param import FirewallParam
+from ....types.gpu_droplets.firewall_list_response import FirewallListResponse
+from ....types.gpu_droplets.firewall_create_response import FirewallCreateResponse
+from ....types.gpu_droplets.firewall_update_response import FirewallUpdateResponse
+from ....types.gpu_droplets.firewall_retrieve_response import FirewallRetrieveResponse
+
+__all__ = ["FirewallsResource", "AsyncFirewallsResource"]
+
+
+class FirewallsResource(SyncAPIResource):
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+
+ @cached_property
+ def droplets(self) -> DropletsResource:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return DropletsResource(self._client)
+
+ @cached_property
+ def tags(self) -> TagsResource:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return TagsResource(self._client)
+
+ @cached_property
+ def rules(self) -> RulesResource:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return RulesResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> FirewallsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return FirewallsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> FirewallsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return FirewallsResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ body: firewall_create_params.Body | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FirewallCreateResponse:
+ """To create a new firewall, send a POST request to `/v2/firewalls`.
+
+ The request
+ must contain at least one inbound or outbound access rule.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls",
+ body=maybe_transform(body, firewall_create_params.FirewallCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FirewallCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ firewall_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FirewallRetrieveResponse:
+ """
+ To show information about an existing firewall, send a GET request to
+ `/v2/firewalls/$FIREWALL_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ return self._get(
+ f"/v2/firewalls/{firewall_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FirewallRetrieveResponse,
+ )
+
+ def update(
+ self,
+ firewall_id: str,
+ *,
+ firewall: FirewallParam,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FirewallUpdateResponse:
+ """
+ To update the configuration of an existing firewall, send a PUT request to
+ `/v2/firewalls/$FIREWALL_ID`. The request should contain a full representation
+ of the firewall including existing attributes. **Note that any attributes that
+ are not provided will be reset to their default values.**
+
+ You must have read access (e.g. `droplet:read`) to all resources attached to the
+ firewall to successfully update the firewall.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ return self._put(
+ f"/v2/firewalls/{firewall_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}",
+ body=maybe_transform(firewall, firewall_update_params.FirewallUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FirewallUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FirewallListResponse:
+ """
+ To list all of the firewalls available on your account, send a GET request to
+ `/v2/firewalls`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ firewall_list_params.FirewallListParams,
+ ),
+ ),
+ cast_to=FirewallListResponse,
+ )
+
+ def delete(
+ self,
+ firewall_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a firewall send a DELETE request to `/v2/firewalls/$FIREWALL_ID`.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/firewalls/{firewall_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncFirewallsResource(AsyncAPIResource):
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+
+ @cached_property
+ def droplets(self) -> AsyncDropletsResource:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncDropletsResource(self._client)
+
+ @cached_property
+ def tags(self) -> AsyncTagsResource:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncTagsResource(self._client)
+
+ @cached_property
+ def rules(self) -> AsyncRulesResource:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncRulesResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncFirewallsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncFirewallsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncFirewallsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncFirewallsResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ body: firewall_create_params.Body | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FirewallCreateResponse:
+ """To create a new firewall, send a POST request to `/v2/firewalls`.
+
+ The request
+ must contain at least one inbound or outbound access rule.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls",
+ body=await async_maybe_transform(body, firewall_create_params.FirewallCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FirewallCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ firewall_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FirewallRetrieveResponse:
+ """
+ To show information about an existing firewall, send a GET request to
+ `/v2/firewalls/$FIREWALL_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ return await self._get(
+ f"/v2/firewalls/{firewall_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FirewallRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ firewall_id: str,
+ *,
+ firewall: FirewallParam,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FirewallUpdateResponse:
+ """
+ To update the configuration of an existing firewall, send a PUT request to
+ `/v2/firewalls/$FIREWALL_ID`. The request should contain a full representation
+ of the firewall including existing attributes. **Note that any attributes that
+ are not provided will be reset to their default values.**
+
+ You must have read access (e.g. `droplet:read`) to all resources attached to the
+ firewall to successfully update the firewall.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ return await self._put(
+ f"/v2/firewalls/{firewall_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}",
+ body=await async_maybe_transform(firewall, firewall_update_params.FirewallUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FirewallUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FirewallListResponse:
+ """
+ To list all of the firewalls available on your account, send a GET request to
+ `/v2/firewalls`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ firewall_list_params.FirewallListParams,
+ ),
+ ),
+ cast_to=FirewallListResponse,
+ )
+
+ async def delete(
+ self,
+ firewall_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a firewall send a DELETE request to `/v2/firewalls/$FIREWALL_ID`.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/firewalls/{firewall_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class FirewallsResourceWithRawResponse:
+ def __init__(self, firewalls: FirewallsResource) -> None:
+ self._firewalls = firewalls
+
+ self.create = to_raw_response_wrapper(
+ firewalls.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ firewalls.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ firewalls.update,
+ )
+ self.list = to_raw_response_wrapper(
+ firewalls.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ firewalls.delete,
+ )
+
+ @cached_property
+ def droplets(self) -> DropletsResourceWithRawResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return DropletsResourceWithRawResponse(self._firewalls.droplets)
+
+ @cached_property
+ def tags(self) -> TagsResourceWithRawResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return TagsResourceWithRawResponse(self._firewalls.tags)
+
+ @cached_property
+ def rules(self) -> RulesResourceWithRawResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return RulesResourceWithRawResponse(self._firewalls.rules)
+
+
+class AsyncFirewallsResourceWithRawResponse:
+ def __init__(self, firewalls: AsyncFirewallsResource) -> None:
+ self._firewalls = firewalls
+
+ self.create = async_to_raw_response_wrapper(
+ firewalls.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ firewalls.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ firewalls.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ firewalls.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ firewalls.delete,
+ )
+
+ @cached_property
+ def droplets(self) -> AsyncDropletsResourceWithRawResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncDropletsResourceWithRawResponse(self._firewalls.droplets)
+
+ @cached_property
+ def tags(self) -> AsyncTagsResourceWithRawResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncTagsResourceWithRawResponse(self._firewalls.tags)
+
+ @cached_property
+ def rules(self) -> AsyncRulesResourceWithRawResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncRulesResourceWithRawResponse(self._firewalls.rules)
+
+
+class FirewallsResourceWithStreamingResponse:
+ def __init__(self, firewalls: FirewallsResource) -> None:
+ self._firewalls = firewalls
+
+ self.create = to_streamed_response_wrapper(
+ firewalls.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ firewalls.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ firewalls.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ firewalls.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ firewalls.delete,
+ )
+
+ @cached_property
+ def droplets(self) -> DropletsResourceWithStreamingResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return DropletsResourceWithStreamingResponse(self._firewalls.droplets)
+
+ @cached_property
+ def tags(self) -> TagsResourceWithStreamingResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return TagsResourceWithStreamingResponse(self._firewalls.tags)
+
+ @cached_property
+ def rules(self) -> RulesResourceWithStreamingResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return RulesResourceWithStreamingResponse(self._firewalls.rules)
+
+
+class AsyncFirewallsResourceWithStreamingResponse:
+ def __init__(self, firewalls: AsyncFirewallsResource) -> None:
+ self._firewalls = firewalls
+
+ self.create = async_to_streamed_response_wrapper(
+ firewalls.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ firewalls.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ firewalls.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ firewalls.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ firewalls.delete,
+ )
+
+ @cached_property
+ def droplets(self) -> AsyncDropletsResourceWithStreamingResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncDropletsResourceWithStreamingResponse(self._firewalls.droplets)
+
+ @cached_property
+ def tags(self) -> AsyncTagsResourceWithStreamingResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncTagsResourceWithStreamingResponse(self._firewalls.tags)
+
+ @cached_property
+ def rules(self) -> AsyncRulesResourceWithStreamingResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncRulesResourceWithStreamingResponse(self._firewalls.rules)
diff --git a/src/gradient/resources/gpu_droplets/firewalls/rules.py b/src/gradient/resources/gpu_droplets/firewalls/rules.py
new file mode 100644
index 00000000..bea27fe4
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/firewalls/rules.py
@@ -0,0 +1,336 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable, Optional
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets.firewalls import rule_add_params, rule_remove_params
+
+__all__ = ["RulesResource", "AsyncRulesResource"]
+
+
+class RulesResource(SyncAPIResource):
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> RulesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return RulesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RulesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return RulesResourceWithStreamingResponse(self)
+
+ def add(
+ self,
+ firewall_id: str,
+ *,
+ inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | Omit = omit,
+ outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To add additional access rules to a firewall, send a POST request to
+ `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an
+ inbound_rules and/or outbound_rules attribute containing an array of rules to be
+ added.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._post(
+ f"/v2/firewalls/{firewall_id}/rules"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules",
+ body=maybe_transform(
+ {
+ "inbound_rules": inbound_rules,
+ "outbound_rules": outbound_rules,
+ },
+ rule_add_params.RuleAddParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def remove(
+ self,
+ firewall_id: str,
+ *,
+ inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | Omit = omit,
+ outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To remove access rules from a firewall, send a DELETE request to
+ `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an
+ `inbound_rules` and/or `outbound_rules` attribute containing an array of rules
+ to be removed.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/firewalls/{firewall_id}/rules"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules",
+ body=maybe_transform(
+ {
+ "inbound_rules": inbound_rules,
+ "outbound_rules": outbound_rules,
+ },
+ rule_remove_params.RuleRemoveParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncRulesResource(AsyncAPIResource):
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncRulesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRulesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRulesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncRulesResourceWithStreamingResponse(self)
+
+ async def add(
+ self,
+ firewall_id: str,
+ *,
+ inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | Omit = omit,
+ outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To add additional access rules to a firewall, send a POST request to
+ `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an
+ inbound_rules and/or outbound_rules attribute containing an array of rules to be
+ added.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._post(
+ f"/v2/firewalls/{firewall_id}/rules"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules",
+ body=await async_maybe_transform(
+ {
+ "inbound_rules": inbound_rules,
+ "outbound_rules": outbound_rules,
+ },
+ rule_add_params.RuleAddParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def remove(
+ self,
+ firewall_id: str,
+ *,
+ inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | Omit = omit,
+ outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To remove access rules from a firewall, send a DELETE request to
+ `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an
+ `inbound_rules` and/or `outbound_rules` attribute containing an array of rules
+ to be removed.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/firewalls/{firewall_id}/rules"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules",
+ body=await async_maybe_transform(
+ {
+ "inbound_rules": inbound_rules,
+ "outbound_rules": outbound_rules,
+ },
+ rule_remove_params.RuleRemoveParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class RulesResourceWithRawResponse:
+ def __init__(self, rules: RulesResource) -> None:
+ self._rules = rules
+
+ self.add = to_raw_response_wrapper(
+ rules.add,
+ )
+ self.remove = to_raw_response_wrapper(
+ rules.remove,
+ )
+
+
+class AsyncRulesResourceWithRawResponse:
+ def __init__(self, rules: AsyncRulesResource) -> None:
+ self._rules = rules
+
+ self.add = async_to_raw_response_wrapper(
+ rules.add,
+ )
+ self.remove = async_to_raw_response_wrapper(
+ rules.remove,
+ )
+
+
+class RulesResourceWithStreamingResponse:
+ def __init__(self, rules: RulesResource) -> None:
+ self._rules = rules
+
+ self.add = to_streamed_response_wrapper(
+ rules.add,
+ )
+ self.remove = to_streamed_response_wrapper(
+ rules.remove,
+ )
+
+
+class AsyncRulesResourceWithStreamingResponse:
+ def __init__(self, rules: AsyncRulesResource) -> None:
+ self._rules = rules
+
+ self.add = async_to_streamed_response_wrapper(
+ rules.add,
+ )
+ self.remove = async_to_streamed_response_wrapper(
+ rules.remove,
+ )
diff --git a/src/gradient/resources/gpu_droplets/firewalls/tags.py b/src/gradient/resources/gpu_droplets/firewalls/tags.py
new file mode 100644
index 00000000..c38788be
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/firewalls/tags.py
@@ -0,0 +1,324 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+import httpx
+
+from ...._types import Body, Query, Headers, NoneType, NotGiven, SequenceNotStr, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets.firewalls import tag_add_params, tag_remove_params
+
+__all__ = ["TagsResource", "AsyncTagsResource"]
+
+
+class TagsResource(SyncAPIResource):
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> TagsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return TagsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> TagsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return TagsResourceWithStreamingResponse(self)
+
+ def add(
+ self,
+ firewall_id: str,
+ *,
+ tags: Optional[SequenceNotStr[str]],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To assign a tag representing a group of Droplets to a firewall, send a POST
+ request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there
+ should be a `tags` attribute containing a list of tag names.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ must exist in order to be referenced in a request.
+
+ Requires `tag:create` and `tag:read` scopes.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._post(
+ f"/v2/firewalls/{firewall_id}/tags"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags",
+ body=maybe_transform({"tags": tags}, tag_add_params.TagAddParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def remove(
+ self,
+ firewall_id: str,
+ *,
+ tags: Optional[SequenceNotStr[str]],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To remove a tag representing a group of Droplets from a firewall, send a DELETE
+ request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there
+ should be a `tags` attribute containing a list of tag names.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ must exist in order to be referenced in a request.
+
+ Requires `tag:create` and `tag:read` scopes.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/firewalls/{firewall_id}/tags"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags",
+ body=maybe_transform({"tags": tags}, tag_remove_params.TagRemoveParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncTagsResource(AsyncAPIResource):
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncTagsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncTagsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncTagsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncTagsResourceWithStreamingResponse(self)
+
+ async def add(
+ self,
+ firewall_id: str,
+ *,
+ tags: Optional[SequenceNotStr[str]],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To assign a tag representing a group of Droplets to a firewall, send a POST
+ request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there
+ should be a `tags` attribute containing a list of tag names.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ must exist in order to be referenced in a request.
+
+ Requires `tag:create` and `tag:read` scopes.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._post(
+ f"/v2/firewalls/{firewall_id}/tags"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags",
+ body=await async_maybe_transform({"tags": tags}, tag_add_params.TagAddParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def remove(
+ self,
+ firewall_id: str,
+ *,
+ tags: Optional[SequenceNotStr[str]],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To remove a tag representing a group of Droplets from a firewall, send a DELETE
+ request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there
+ should be a `tags` attribute containing a list of tag names.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ must exist in order to be referenced in a request.
+
+ Requires `tag:create` and `tag:read` scopes.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not firewall_id:
+ raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/firewalls/{firewall_id}/tags"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags",
+ body=await async_maybe_transform({"tags": tags}, tag_remove_params.TagRemoveParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class TagsResourceWithRawResponse:
+ def __init__(self, tags: TagsResource) -> None:
+ self._tags = tags
+
+ self.add = to_raw_response_wrapper(
+ tags.add,
+ )
+ self.remove = to_raw_response_wrapper(
+ tags.remove,
+ )
+
+
+class AsyncTagsResourceWithRawResponse:
+ def __init__(self, tags: AsyncTagsResource) -> None:
+ self._tags = tags
+
+ self.add = async_to_raw_response_wrapper(
+ tags.add,
+ )
+ self.remove = async_to_raw_response_wrapper(
+ tags.remove,
+ )
+
+
+class TagsResourceWithStreamingResponse:
+ def __init__(self, tags: TagsResource) -> None:
+ self._tags = tags
+
+ self.add = to_streamed_response_wrapper(
+ tags.add,
+ )
+ self.remove = to_streamed_response_wrapper(
+ tags.remove,
+ )
+
+
+class AsyncTagsResourceWithStreamingResponse:
+ def __init__(self, tags: AsyncTagsResource) -> None:
+ self._tags = tags
+
+ self.add = async_to_streamed_response_wrapper(
+ tags.add,
+ )
+ self.remove = async_to_streamed_response_wrapper(
+ tags.remove,
+ )
diff --git a/src/gradient/resources/gpu_droplets/floating_ips/__init__.py b/src/gradient/resources/gpu_droplets/floating_ips/__init__.py
new file mode 100644
index 00000000..bf6871b1
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/floating_ips/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .actions import (
+ ActionsResource,
+ AsyncActionsResource,
+ ActionsResourceWithRawResponse,
+ AsyncActionsResourceWithRawResponse,
+ ActionsResourceWithStreamingResponse,
+ AsyncActionsResourceWithStreamingResponse,
+)
+from .floating_ips import (
+ FloatingIPsResource,
+ AsyncFloatingIPsResource,
+ FloatingIPsResourceWithRawResponse,
+ AsyncFloatingIPsResourceWithRawResponse,
+ FloatingIPsResourceWithStreamingResponse,
+ AsyncFloatingIPsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "ActionsResource",
+ "AsyncActionsResource",
+ "ActionsResourceWithRawResponse",
+ "AsyncActionsResourceWithRawResponse",
+ "ActionsResourceWithStreamingResponse",
+ "AsyncActionsResourceWithStreamingResponse",
+ "FloatingIPsResource",
+ "AsyncFloatingIPsResource",
+ "FloatingIPsResourceWithRawResponse",
+ "AsyncFloatingIPsResourceWithRawResponse",
+ "FloatingIPsResourceWithStreamingResponse",
+ "AsyncFloatingIPsResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/gpu_droplets/floating_ips/actions.py b/src/gradient/resources/gpu_droplets/floating_ips/actions.py
new file mode 100644
index 00000000..83e0b918
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/floating_ips/actions.py
@@ -0,0 +1,533 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import required_args, maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets.floating_ips import action_create_params
+from ....types.gpu_droplets.floating_ips.action_list_response import ActionListResponse
+from ....types.gpu_droplets.floating_ips.action_create_response import ActionCreateResponse
+from ....types.gpu_droplets.floating_ips.action_retrieve_response import ActionRetrieveResponse
+
+__all__ = ["ActionsResource", "AsyncActionsResource"]
+
+
+class ActionsResource(SyncAPIResource):
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ Floating IP actions are commands that can be given to a DigitalOcean
+ floating IP. These requests are made on the actions endpoint of a specific
+ floating IP.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> ActionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ActionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ActionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ActionsResourceWithStreamingResponse(self)
+
+ @overload
+ def create(
+ self,
+ floating_ip: str,
+ *,
+ type: Literal["assign", "unassign"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionCreateResponse:
+ """
+ To initiate an action on a floating IP send a POST request to
+ `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set
+ the `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ---------- | ------------------------------------- |
+ | `assign` | Assigns a floating IP to a Droplet |
+ | `unassign` | Unassign a floating IP from a Droplet |
+
+ Args:
+ type: The type of action to initiate for the floating IP.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ floating_ip: str,
+ *,
+ droplet_id: int,
+ type: Literal["assign", "unassign"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionCreateResponse:
+ """
+ To initiate an action on a floating IP send a POST request to
+ `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set
+ the `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ---------- | ------------------------------------- |
+ | `assign` | Assigns a floating IP to a Droplet |
+ | `unassign` | Unassign a floating IP from a Droplet |
+
+ Args:
+ droplet_id: The ID of the Droplet that the floating IP will be assigned to.
+
+ type: The type of action to initiate for the floating IP.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["type"], ["droplet_id", "type"])
+ def create(
+ self,
+ floating_ip: str,
+ *,
+ type: Literal["assign", "unassign"],
+ droplet_id: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionCreateResponse:
+ if not floating_ip:
+ raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}")
+ return self._post(
+ f"/v2/floating_ips/{floating_ip}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions",
+ body=maybe_transform(
+ {
+ "type": type,
+ "droplet_id": droplet_id,
+ },
+ action_create_params.ActionCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ action_id: int,
+ *,
+ floating_ip: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionRetrieveResponse:
+ """
+ To retrieve the status of a floating IP action, send a GET request to
+ `/v2/floating_ips/$FLOATING_IP/actions/$ACTION_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not floating_ip:
+ raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}")
+ return self._get(
+ f"/v2/floating_ips/{floating_ip}/actions/{action_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions/{action_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionRetrieveResponse,
+ )
+
+ def list(
+ self,
+ floating_ip: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionListResponse:
+ """
+ To retrieve all actions that have been executed on a floating IP, send a GET
+ request to `/v2/floating_ips/$FLOATING_IP/actions`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not floating_ip:
+ raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}")
+ return self._get(
+ f"/v2/floating_ips/{floating_ip}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionListResponse,
+ )
+
+
+class AsyncActionsResource(AsyncAPIResource):
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ Floating IP actions are commands that can be given to a DigitalOcean
+ floating IP. These requests are made on the actions endpoint of a specific
+ floating IP.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncActionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncActionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncActionsResourceWithStreamingResponse(self)
+
+ @overload
+ async def create(
+ self,
+ floating_ip: str,
+ *,
+ type: Literal["assign", "unassign"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionCreateResponse:
+ """
+ To initiate an action on a floating IP send a POST request to
+ `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set
+ the `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ---------- | ------------------------------------- |
+ | `assign` | Assigns a floating IP to a Droplet |
+ | `unassign` | Unassign a floating IP from a Droplet |
+
+ Args:
+ type: The type of action to initiate for the floating IP.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ floating_ip: str,
+ *,
+ droplet_id: int,
+ type: Literal["assign", "unassign"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionCreateResponse:
+ """
+ To initiate an action on a floating IP send a POST request to
+ `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set
+ the `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ---------- | ------------------------------------- |
+ | `assign` | Assigns a floating IP to a Droplet |
+ | `unassign` | Unassign a floating IP from a Droplet |
+
+ Args:
+ droplet_id: The ID of the Droplet that the floating IP will be assigned to.
+
+ type: The type of action to initiate for the floating IP.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["type"], ["droplet_id", "type"])
+ async def create(
+ self,
+ floating_ip: str,
+ *,
+ type: Literal["assign", "unassign"],
+ droplet_id: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionCreateResponse:
+ if not floating_ip:
+ raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}")
+ return await self._post(
+ f"/v2/floating_ips/{floating_ip}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions",
+ body=await async_maybe_transform(
+ {
+ "type": type,
+ "droplet_id": droplet_id,
+ },
+ action_create_params.ActionCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ action_id: int,
+ *,
+ floating_ip: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionRetrieveResponse:
+ """
+ To retrieve the status of a floating IP action, send a GET request to
+ `/v2/floating_ips/$FLOATING_IP/actions/$ACTION_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not floating_ip:
+ raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}")
+ return await self._get(
+ f"/v2/floating_ips/{floating_ip}/actions/{action_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions/{action_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ floating_ip: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionListResponse:
+ """
+ To retrieve all actions that have been executed on a floating IP, send a GET
+ request to `/v2/floating_ips/$FLOATING_IP/actions`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not floating_ip:
+ raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}")
+ return await self._get(
+ f"/v2/floating_ips/{floating_ip}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionListResponse,
+ )
+
+
+class ActionsResourceWithRawResponse:
+ def __init__(self, actions: ActionsResource) -> None:
+ self._actions = actions
+
+ self.create = to_raw_response_wrapper(
+ actions.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ actions.list,
+ )
+
+
+class AsyncActionsResourceWithRawResponse:
+ def __init__(self, actions: AsyncActionsResource) -> None:
+ self._actions = actions
+
+ self.create = async_to_raw_response_wrapper(
+ actions.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ actions.list,
+ )
+
+
+class ActionsResourceWithStreamingResponse:
+ def __init__(self, actions: ActionsResource) -> None:
+ self._actions = actions
+
+ self.create = to_streamed_response_wrapper(
+ actions.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ actions.list,
+ )
+
+
+class AsyncActionsResourceWithStreamingResponse:
+ def __init__(self, actions: AsyncActionsResource) -> None:
+ self._actions = actions
+
+ self.create = async_to_streamed_response_wrapper(
+ actions.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ actions.list,
+ )
diff --git a/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py b/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py
new file mode 100644
index 00000000..0e65e6aa
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py
@@ -0,0 +1,793 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import overload
+
+import httpx
+
+from .actions import (
+ ActionsResource,
+ AsyncActionsResource,
+ ActionsResourceWithRawResponse,
+ AsyncActionsResourceWithRawResponse,
+ ActionsResourceWithStreamingResponse,
+ AsyncActionsResourceWithStreamingResponse,
+)
+from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
+from ...._utils import required_args, maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets import floating_ip_list_params, floating_ip_create_params
+from ....types.gpu_droplets.floating_ip_list_response import FloatingIPListResponse
+from ....types.gpu_droplets.floating_ip_create_response import FloatingIPCreateResponse
+from ....types.gpu_droplets.floating_ip_retrieve_response import FloatingIPRetrieveResponse
+
+__all__ = ["FloatingIPsResource", "AsyncFloatingIPsResource"]
+
+
+class FloatingIPsResource(SyncAPIResource):
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ [DigitalOcean Floating IPs](https://docs.digitalocean.com/products/networking/reserved-ips/)
+ are publicly-accessible static IP addresses that can be mapped to one of
+ your Droplets. They can be used to create highly available setups or other
+ configurations requiring movable addresses.
+
+ Floating IPs are bound to a specific region.
+ """
+
+ @cached_property
+ def actions(self) -> ActionsResource:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ Floating IP actions are commands that can be given to a DigitalOcean
+ floating IP. These requests are made on the actions endpoint of a specific
+ floating IP.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return ActionsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> FloatingIPsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return FloatingIPsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> FloatingIPsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return FloatingIPsResourceWithStreamingResponse(self)
+
+ @overload
+ def create(
+ self,
+ *,
+ droplet_id: int,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FloatingIPCreateResponse:
+ """
+ On creation, a floating IP must be either assigned to a Droplet or reserved to a
+ region.
+
+ - To create a new floating IP assigned to a Droplet, send a POST request to
+ `/v2/floating_ips` with the `droplet_id` attribute.
+
+ - To create a new floating IP reserved to a region, send a POST request to
+ `/v2/floating_ips` with the `region` attribute.
+
+ Args:
+ droplet_id: The ID of the Droplet that the floating IP will be assigned to.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ region: str,
+ project_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FloatingIPCreateResponse:
+ """
+ On creation, a floating IP must be either assigned to a Droplet or reserved to a
+ region.
+
+ - To create a new floating IP assigned to a Droplet, send a POST request to
+ `/v2/floating_ips` with the `droplet_id` attribute.
+
+ - To create a new floating IP reserved to a region, send a POST request to
+ `/v2/floating_ips` with the `region` attribute.
+
+ Args:
+ region: The slug identifier for the region the floating IP will be reserved to.
+
+ project_id: The UUID of the project to which the floating IP will be assigned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["droplet_id"], ["region"])
+ def create(
+ self,
+ *,
+ droplet_id: int | Omit = omit,
+ region: str | Omit = omit,
+ project_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FloatingIPCreateResponse:
+ return self._post(
+ "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips",
+ body=maybe_transform(
+ {
+ "droplet_id": droplet_id,
+ "region": region,
+ "project_id": project_id,
+ },
+ floating_ip_create_params.FloatingIPCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FloatingIPCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ floating_ip: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FloatingIPRetrieveResponse:
+ """
+ To show information about a floating IP, send a GET request to
+ `/v2/floating_ips/$FLOATING_IP_ADDR`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not floating_ip:
+ raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}")
+ return self._get(
+ f"/v2/floating_ips/{floating_ip}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FloatingIPRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FloatingIPListResponse:
+ """
+ To list all of the floating IPs available on your account, send a GET request to
+ `/v2/floating_ips`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ floating_ip_list_params.FloatingIPListParams,
+ ),
+ ),
+ cast_to=FloatingIPListResponse,
+ )
+
+ def delete(
+ self,
+ floating_ip: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a floating IP and remove it from your account, send a DELETE request
+ to `/v2/floating_ips/$FLOATING_IP_ADDR`.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not floating_ip:
+ raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/floating_ips/{floating_ip}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncFloatingIPsResource(AsyncAPIResource):
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ [DigitalOcean Floating IPs](https://docs.digitalocean.com/products/networking/reserved-ips/)
+ are publicly-accessible static IP addresses that can be mapped to one of
+ your Droplets. They can be used to create highly available setups or other
+ configurations requiring movable addresses.
+
+ Floating IPs are bound to a specific region.
+ """
+
+ @cached_property
+ def actions(self) -> AsyncActionsResource:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ Floating IP actions are commands that can be given to a DigitalOcean
+ floating IP. These requests are made on the actions endpoint of a specific
+ floating IP.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return AsyncActionsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncFloatingIPsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncFloatingIPsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncFloatingIPsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncFloatingIPsResourceWithStreamingResponse(self)
+
+ @overload
+ async def create(
+ self,
+ *,
+ droplet_id: int,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FloatingIPCreateResponse:
+ """
+ On creation, a floating IP must be either assigned to a Droplet or reserved to a
+ region.
+
+ - To create a new floating IP assigned to a Droplet, send a POST request to
+ `/v2/floating_ips` with the `droplet_id` attribute.
+
+ - To create a new floating IP reserved to a region, send a POST request to
+ `/v2/floating_ips` with the `region` attribute.
+
+ Args:
+ droplet_id: The ID of the Droplet that the floating IP will be assigned to.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ region: str,
+ project_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FloatingIPCreateResponse:
+ """
+ On creation, a floating IP must be either assigned to a Droplet or reserved to a
+ region.
+
+ - To create a new floating IP assigned to a Droplet, send a POST request to
+ `/v2/floating_ips` with the `droplet_id` attribute.
+
+ - To create a new floating IP reserved to a region, send a POST request to
+ `/v2/floating_ips` with the `region` attribute.
+
+ Args:
+ region: The slug identifier for the region the floating IP will be reserved to.
+
+ project_id: The UUID of the project to which the floating IP will be assigned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["droplet_id"], ["region"])
+ async def create(
+ self,
+ *,
+ droplet_id: int | Omit = omit,
+ region: str | Omit = omit,
+ project_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FloatingIPCreateResponse:
+ return await self._post(
+ "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips",
+ body=await async_maybe_transform(
+ {
+ "droplet_id": droplet_id,
+ "region": region,
+ "project_id": project_id,
+ },
+ floating_ip_create_params.FloatingIPCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FloatingIPCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ floating_ip: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FloatingIPRetrieveResponse:
+ """
+ To show information about a floating IP, send a GET request to
+ `/v2/floating_ips/$FLOATING_IP_ADDR`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not floating_ip:
+ raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}")
+ return await self._get(
+ f"/v2/floating_ips/{floating_ip}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FloatingIPRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FloatingIPListResponse:
+ """
+ To list all of the floating IPs available on your account, send a GET request to
+ `/v2/floating_ips`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ floating_ip_list_params.FloatingIPListParams,
+ ),
+ ),
+ cast_to=FloatingIPListResponse,
+ )
+
+ async def delete(
+ self,
+ floating_ip: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a floating IP and remove it from your account, send a DELETE request
+ to `/v2/floating_ips/$FLOATING_IP_ADDR`.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not floating_ip:
+ raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/floating_ips/{floating_ip}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class FloatingIPsResourceWithRawResponse:
+ def __init__(self, floating_ips: FloatingIPsResource) -> None:
+ self._floating_ips = floating_ips
+
+ self.create = to_raw_response_wrapper(
+ floating_ips.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ floating_ips.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ floating_ips.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ floating_ips.delete,
+ )
+
+ @cached_property
+ def actions(self) -> ActionsResourceWithRawResponse:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ Floating IP actions are commands that can be given to a DigitalOcean
+ floating IP. These requests are made on the actions endpoint of a specific
+ floating IP.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return ActionsResourceWithRawResponse(self._floating_ips.actions)
+
+
+class AsyncFloatingIPsResourceWithRawResponse:
+ def __init__(self, floating_ips: AsyncFloatingIPsResource) -> None:
+ self._floating_ips = floating_ips
+
+ self.create = async_to_raw_response_wrapper(
+ floating_ips.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ floating_ips.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ floating_ips.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ floating_ips.delete,
+ )
+
+ @cached_property
+ def actions(self) -> AsyncActionsResourceWithRawResponse:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ Floating IP actions are commands that can be given to a DigitalOcean
+ floating IP. These requests are made on the actions endpoint of a specific
+ floating IP.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return AsyncActionsResourceWithRawResponse(self._floating_ips.actions)
+
+
+class FloatingIPsResourceWithStreamingResponse:
+ def __init__(self, floating_ips: FloatingIPsResource) -> None:
+ self._floating_ips = floating_ips
+
+ self.create = to_streamed_response_wrapper(
+ floating_ips.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ floating_ips.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ floating_ips.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ floating_ips.delete,
+ )
+
+ @cached_property
+ def actions(self) -> ActionsResourceWithStreamingResponse:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ Floating IP actions are commands that can be given to a DigitalOcean
+ floating IP. These requests are made on the actions endpoint of a specific
+ floating IP.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return ActionsResourceWithStreamingResponse(self._floating_ips.actions)
+
+
+class AsyncFloatingIPsResourceWithStreamingResponse:
+ def __init__(self, floating_ips: AsyncFloatingIPsResource) -> None:
+ self._floating_ips = floating_ips
+
+ self.create = async_to_streamed_response_wrapper(
+ floating_ips.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ floating_ips.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ floating_ips.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ floating_ips.delete,
+ )
+
+ @cached_property
+ def actions(self) -> AsyncActionsResourceWithStreamingResponse:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ Floating IP actions are commands that can be given to a DigitalOcean
+ floating IP. These requests are made on the actions endpoint of a specific
+ floating IP.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return AsyncActionsResourceWithStreamingResponse(self._floating_ips.actions)
diff --git a/src/gradient/resources/gpu_droplets/gpu_droplets.py b/src/gradient/resources/gpu_droplets/gpu_droplets.py
new file mode 100644
index 00000000..5c0b1274
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/gpu_droplets.py
@@ -0,0 +1,2858 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, Union, Optional, cast
+from typing_extensions import Literal, overload
+
+import httpx
+
+from .sizes import (
+ SizesResource,
+ AsyncSizesResource,
+ SizesResourceWithRawResponse,
+ AsyncSizesResourceWithRawResponse,
+ SizesResourceWithStreamingResponse,
+ AsyncSizesResourceWithStreamingResponse,
+)
+from ...types import (
+ gpu_droplet_list_params,
+ gpu_droplet_create_params,
+ gpu_droplet_list_kernels_params,
+ gpu_droplet_delete_by_tag_params,
+ gpu_droplet_list_firewalls_params,
+ gpu_droplet_list_snapshots_params,
+)
+from .actions import (
+ ActionsResource,
+ AsyncActionsResource,
+ ActionsResourceWithRawResponse,
+ AsyncActionsResourceWithRawResponse,
+ ActionsResourceWithStreamingResponse,
+ AsyncActionsResourceWithStreamingResponse,
+)
+from .backups import (
+ BackupsResource,
+ AsyncBackupsResource,
+ BackupsResourceWithRawResponse,
+ AsyncBackupsResourceWithRawResponse,
+ BackupsResourceWithStreamingResponse,
+ AsyncBackupsResourceWithStreamingResponse,
+)
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
+from ..._utils import required_args, maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from .autoscale import (
+ AutoscaleResource,
+ AsyncAutoscaleResource,
+ AutoscaleResourceWithRawResponse,
+ AsyncAutoscaleResourceWithRawResponse,
+ AutoscaleResourceWithStreamingResponse,
+ AsyncAutoscaleResourceWithStreamingResponse,
+)
+from .snapshots import (
+ SnapshotsResource,
+ AsyncSnapshotsResource,
+ SnapshotsResourceWithRawResponse,
+ AsyncSnapshotsResourceWithRawResponse,
+ SnapshotsResourceWithStreamingResponse,
+ AsyncSnapshotsResourceWithStreamingResponse,
+)
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .images.images import (
+ ImagesResource,
+ AsyncImagesResource,
+ ImagesResourceWithRawResponse,
+ AsyncImagesResourceWithRawResponse,
+ ImagesResourceWithStreamingResponse,
+ AsyncImagesResourceWithStreamingResponse,
+)
+from ..._base_client import make_request_options
+from .account.account import (
+ AccountResource,
+ AsyncAccountResource,
+ AccountResourceWithRawResponse,
+ AsyncAccountResourceWithRawResponse,
+ AccountResourceWithStreamingResponse,
+ AsyncAccountResourceWithStreamingResponse,
+)
+from .volumes.volumes import (
+ VolumesResource,
+ AsyncVolumesResource,
+ VolumesResourceWithRawResponse,
+ AsyncVolumesResourceWithRawResponse,
+ VolumesResourceWithStreamingResponse,
+ AsyncVolumesResourceWithStreamingResponse,
+)
+from .firewalls.firewalls import (
+ FirewallsResource,
+ AsyncFirewallsResource,
+ FirewallsResourceWithRawResponse,
+ AsyncFirewallsResourceWithRawResponse,
+ FirewallsResourceWithStreamingResponse,
+ AsyncFirewallsResourceWithStreamingResponse,
+)
+from .floating_ips.floating_ips import (
+ FloatingIPsResource,
+ AsyncFloatingIPsResource,
+ FloatingIPsResourceWithRawResponse,
+ AsyncFloatingIPsResourceWithRawResponse,
+ FloatingIPsResourceWithStreamingResponse,
+ AsyncFloatingIPsResourceWithStreamingResponse,
+)
+from .load_balancers.load_balancers import (
+ LoadBalancersResource,
+ AsyncLoadBalancersResource,
+ LoadBalancersResourceWithRawResponse,
+ AsyncLoadBalancersResourceWithRawResponse,
+ LoadBalancersResourceWithStreamingResponse,
+ AsyncLoadBalancersResourceWithStreamingResponse,
+)
+from ...types.gpu_droplet_list_response import GPUDropletListResponse
+from .destroy_with_associated_resources import (
+ DestroyWithAssociatedResourcesResource,
+ AsyncDestroyWithAssociatedResourcesResource,
+ DestroyWithAssociatedResourcesResourceWithRawResponse,
+ AsyncDestroyWithAssociatedResourcesResourceWithRawResponse,
+ DestroyWithAssociatedResourcesResourceWithStreamingResponse,
+ AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse,
+)
+from ...types.droplet_backup_policy_param import DropletBackupPolicyParam
+from ...types.gpu_droplet_create_response import GPUDropletCreateResponse
+from ...types.gpu_droplet_retrieve_response import GPUDropletRetrieveResponse
+from ...types.gpu_droplet_list_kernels_response import GPUDropletListKernelsResponse
+from ...types.gpu_droplet_list_firewalls_response import GPUDropletListFirewallsResponse
+from ...types.gpu_droplet_list_neighbors_response import GPUDropletListNeighborsResponse
+from ...types.gpu_droplet_list_snapshots_response import GPUDropletListSnapshotsResponse
+
+__all__ = ["GPUDropletsResource", "AsyncGPUDropletsResource"]
+
+
+class GPUDropletsResource(SyncAPIResource):
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+
+ @cached_property
+ def backups(self) -> BackupsResource:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return BackupsResource(self._client)
+
+ @cached_property
+ def actions(self) -> ActionsResource:
+ """Droplet actions are tasks that can be executed on a Droplet.
+
+ These can be
+ things like rebooting, resizing, snapshotting, etc.
+
+ Droplet action requests are generally targeted at one of the "actions"
+ endpoints for a specific Droplet. The specific actions are usually
+ initiated by sending a POST request with the action and arguments as
+ parameters.
+
+ Droplet action requests create a Droplet actions object, which can be used
+ to get information about the status of an action. Creating a Droplet
+ action is asynchronous: the HTTP call will return the action object before
+ the action has finished processing on the Droplet. The current status of
+ an action can be retrieved from either the Droplet actions endpoint or the
+ global actions endpoint. If a Droplet action is uncompleted it may block
+ the creation of a subsequent action for that Droplet, the locked attribute
+ of the Droplet will be true and attempts to create a Droplet action will
+ fail with a status of 422.
+ """
+ return ActionsResource(self._client)
+
+ @cached_property
+ def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResource:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return DestroyWithAssociatedResourcesResource(self._client)
+
+ @cached_property
+ def autoscale(self) -> AutoscaleResource:
+ """
+ Droplet autoscale pools manage automatic horizontal scaling for your applications based on resource usage (CPU, memory, or both) or a static configuration.
+ """
+ return AutoscaleResource(self._client)
+
+ @cached_property
+ def firewalls(self) -> FirewallsResource:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return FirewallsResource(self._client)
+
+ @cached_property
+ def floating_ips(self) -> FloatingIPsResource:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ [DigitalOcean Floating IPs](https://docs.digitalocean.com/products/networking/reserved-ips/)
+ are publicly-accessible static IP addresses that can be mapped to one of
+ your Droplets. They can be used to create highly available setups or other
+ configurations requiring movable addresses.
+
+ Floating IPs are bound to a specific region.
+ """
+ return FloatingIPsResource(self._client)
+
+ @cached_property
+ def images(self) -> ImagesResource:
+ """
+ A DigitalOcean [image](https://docs.digitalocean.com/products/images/) can be
+ used to create a Droplet and may come in a number of flavors. Currently,
+ there are five types of images: snapshots, backups, applications,
+ distributions, and custom images.
+
+ * [Snapshots](https://docs.digitalocean.com/products/snapshots/) provide
+ a full copy of an existing Droplet instance taken on demand.
+
+ * [Backups](https://docs.digitalocean.com/products/backups/) are similar
+ to snapshots but are created automatically at regular intervals when
+ enabled for a Droplet.
+
+ * [Custom images](https://docs.digitalocean.com/products/custom-images/)
+ are Linux-based virtual machine images (raw, qcow2, vhdx, vdi, and vmdk
+ formats are supported) that you may upload for use on DigitalOcean.
+
+ * Distributions are the public Linux distributions that are available to
+ be used as a base to create Droplets.
+
+ * Applications, or [1-Click Apps](https://docs.digitalocean.com/products/marketplace/),
+ are distributions pre-configured with additional software.
+
+ To interact with images, you will generally send requests to the images
+ endpoint at /v2/images.
+ """
+ return ImagesResource(self._client)
+
+ @cached_property
+ def load_balancers(self) -> LoadBalancersResource:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return LoadBalancersResource(self._client)
+
+ @cached_property
+ def sizes(self) -> SizesResource:
+ """
+ The sizes objects represent different packages of hardware resources that
+ can be used for Droplets. When a Droplet is created, a size must be
+ selected so that the correct resources can be allocated.
+
+ Each size represents a plan that bundles together specific sets of
+ resources. This includes the amount of RAM, the number of virtual CPUs,
+ disk space, and transfer. The size object also includes the pricing
+ details and the regions that the size is available in.
+ """
+ return SizesResource(self._client)
+
+ @cached_property
+ def snapshots(self) -> SnapshotsResource:
+ """
+ [Snapshots](https://docs.digitalocean.com/products/snapshots/) are saved
+ instances of a Droplet or a block storage volume, which is reflected in
+ the `resource_type` attribute. In order to avoid problems with compressing
+ filesystems, each defines a `min_disk_size` attribute which is the minimum
+ size of the Droplet or volume disk when creating a new resource from the
+ saved snapshot.
+
+ To interact with snapshots, you will generally send requests to the
+ snapshots endpoint at `/v2/snapshots`.
+ """
+ return SnapshotsResource(self._client)
+
+ @cached_property
+ def volumes(self) -> VolumesResource:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return VolumesResource(self._client)
+
+ @cached_property
+ def account(self) -> AccountResource:
+ return AccountResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> GPUDropletsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return GPUDropletsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> GPUDropletsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return GPUDropletsResourceWithStreamingResponse(self)
+
+ @overload
+ def create(
+ self,
+ *,
+ image: Union[str, int],
+ name: str,
+ size: str,
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ backups: bool | Omit = omit,
+ ipv6: bool | Omit = omit,
+ monitoring: bool | Omit = omit,
+ private_networking: bool | Omit = omit,
+ region: str | Omit = omit,
+ ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ user_data: str | Omit = omit,
+ volumes: SequenceNotStr[str] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ with_droplet_agent: bool | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletCreateResponse:
+ """
+ To create a new Droplet, send a POST request to `/v2/droplets` setting the
+ required attributes.
+
+ A Droplet will be created using the provided information. The response body will
+ contain a JSON object with a key called `droplet`. The value will be an object
+ containing the standard attributes for your new Droplet. The response code, 202
+ Accepted, does not indicate the success or failure of the operation, just that
+ the request has been accepted for processing. The `actions` returned as part of
+ the response's `links` object can be used to check the status of the Droplet
+ create event.
+
+ ### Create Multiple Droplets
+
+ Creating multiple Droplets is very similar to creating a single Droplet. Instead
+ of sending `name` as a string, send `names` as an array of strings. A Droplet
+ will be created for each name you send using the associated information. Up to
+ ten Droplets may be created this way at a time.
+
+ Rather than returning a single Droplet, the response body will contain a JSON
+ array with a key called `droplets`. This will be set to an array of JSON
+ objects, each of which will contain the standard Droplet attributes. The
+ response code, 202 Accepted, does not indicate the success or failure of any
+ operation, just that the request has been accepted for processing. The array of
+ `actions` returned as part of the response's `links` object can be used to check
+ the status of each individual Droplet create event.
+
+ Args:
+ image: The image ID of a public or private image or the slug identifier for a public
+ image. This image will be the base image for your Droplet. Requires `image:read`
+ scope.
+
+ name: The human-readable string you wish to use when displaying the Droplet name. The
+ name, if set to a domain name managed in the DigitalOcean DNS management system,
+ will configure a PTR record for the Droplet. The name set during creation will
+ also determine the hostname for the Droplet in its internal configuration.
+
+ size: The slug identifier for the size that you wish to select for this Droplet.
+
+ backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups`
+ is `true`, the backup plan will default to daily.
+
+ backups: A boolean indicating whether automated backups should be enabled for the
+ Droplet.
+
+ ipv6: A boolean indicating whether to enable IPv6 on the Droplet.
+
+ monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring.
+
+ private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC
+ network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be
+ placed in your account's default VPC for the region.
+
+ region: The slug identifier for the region that you wish to deploy the Droplet in. If
+ the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be
+ used to deploy the Droplet in any of the that region's locations (`nyc1`,
+ `nyc2`, or `nyc3`). If the region is omitted from the create request completely,
+ the Droplet may deploy in any region.
+
+ ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to
+ embed in the Droplet's root account upon creation. You must add the keys to your
+ team before they can be embedded on a Droplet. Requires `ssh_key:read` scope.
+
+ tags: A flat array of tag names as strings to apply to the Droplet after it is
+ created. Tag names can either be existing or new tags. Requires `tag:create`
+ scope.
+
+ user_data: A string containing 'user data' which may be used to configure the Droplet on
+ first boot, often a 'cloud-config' file or Bash script. It must be plain text
+ and may not exceed 64 KiB in size.
+
+ volumes: An array of IDs for block storage volumes that will be attached to the Droplet
+ once created. The volumes must not already be attached to an existing Droplet.
+ Requires `block_storage:read` scpoe.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned.
+ If excluded, the Droplet will be assigned to your account's default VPC for the
+ region. Requires `vpc:read` scope.
+
+ with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for
+ providing access to the Droplet web console in the control panel. By default,
+ the agent is installed on new Droplets but installation errors (i.e. OS not
+ supported) are ignored. To prevent it from being installed, set to `false`. To
+ make installation errors fatal, explicitly set it to `true`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ image: Union[str, int],
+ names: SequenceNotStr[str],
+ size: str,
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ backups: bool | Omit = omit,
+ ipv6: bool | Omit = omit,
+ monitoring: bool | Omit = omit,
+ private_networking: bool | Omit = omit,
+ region: str | Omit = omit,
+ ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ user_data: str | Omit = omit,
+ volumes: SequenceNotStr[str] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ with_droplet_agent: bool | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletCreateResponse:
+ """
+ To create a new Droplet, send a POST request to `/v2/droplets` setting the
+ required attributes.
+
+ A Droplet will be created using the provided information. The response body will
+ contain a JSON object with a key called `droplet`. The value will be an object
+ containing the standard attributes for your new Droplet. The response code, 202
+ Accepted, does not indicate the success or failure of the operation, just that
+ the request has been accepted for processing. The `actions` returned as part of
+ the response's `links` object can be used to check the status of the Droplet
+ create event.
+
+ ### Create Multiple Droplets
+
+ Creating multiple Droplets is very similar to creating a single Droplet. Instead
+ of sending `name` as a string, send `names` as an array of strings. A Droplet
+ will be created for each name you send using the associated information. Up to
+ ten Droplets may be created this way at a time.
+
+ Rather than returning a single Droplet, the response body will contain a JSON
+ array with a key called `droplets`. This will be set to an array of JSON
+ objects, each of which will contain the standard Droplet attributes. The
+ response code, 202 Accepted, does not indicate the success or failure of any
+ operation, just that the request has been accepted for processing. The array of
+ `actions` returned as part of the response's `links` object can be used to check
+ the status of each individual Droplet create event.
+
+ Args:
+ image: The image ID of a public or private image or the slug identifier for a public
+ image. This image will be the base image for your Droplet. Requires `image:read`
+ scope.
+
+ names: An array of human human-readable strings you wish to use when displaying the
+ Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS
+ management system, will configure a PTR record for the Droplet. Each name set
+ during creation will also determine the hostname for the Droplet in its internal
+ configuration.
+
+ size: The slug identifier for the size that you wish to select for this Droplet.
+
+ backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups`
+ is `true`, the backup plan will default to daily.
+
+ backups: A boolean indicating whether automated backups should be enabled for the
+ Droplet.
+
+ ipv6: A boolean indicating whether to enable IPv6 on the Droplet.
+
+ monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring.
+
+ private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC
+ network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be
+ placed in your account's default VPC for the region.
+
+ region: The slug identifier for the region that you wish to deploy the Droplet in. If
+ the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be
+ used to deploy the Droplet in any of the that region's locations (`nyc1`,
+ `nyc2`, or `nyc3`). If the region is omitted from the create request completely,
+ the Droplet may deploy in any region.
+
+ ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to
+ embed in the Droplet's root account upon creation. You must add the keys to your
+ team before they can be embedded on a Droplet. Requires `ssh_key:read` scope.
+
+ tags: A flat array of tag names as strings to apply to the Droplet after it is
+ created. Tag names can either be existing or new tags. Requires `tag:create`
+ scope.
+
+ user_data: A string containing 'user data' which may be used to configure the Droplet on
+ first boot, often a 'cloud-config' file or Bash script. It must be plain text
+ and may not exceed 64 KiB in size.
+
+ volumes: An array of IDs for block storage volumes that will be attached to the Droplet
+ once created. The volumes must not already be attached to an existing Droplet.
+ Requires `block_storage:read` scpoe.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned.
+ If excluded, the Droplet will be assigned to your account's default VPC for the
+ region. Requires `vpc:read` scope.
+
+ with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for
+ providing access to the Droplet web console in the control panel. By default,
+ the agent is installed on new Droplets but installation errors (i.e. OS not
+ supported) are ignored. To prevent it from being installed, set to `false`. To
+ make installation errors fatal, explicitly set it to `true`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["image", "name", "size"], ["image", "names", "size"])
+ def create(
+ self,
+ *,
+ image: Union[str, int],
+ name: str | Omit = omit,
+ size: str,
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ backups: bool | Omit = omit,
+ ipv6: bool | Omit = omit,
+ monitoring: bool | Omit = omit,
+ private_networking: bool | Omit = omit,
+ region: str | Omit = omit,
+ ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ user_data: str | Omit = omit,
+ volumes: SequenceNotStr[str] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ with_droplet_agent: bool | Omit = omit,
+ names: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletCreateResponse:
+ return cast(
+ GPUDropletCreateResponse,
+ self._post(
+ "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets",
+ body=maybe_transform(
+ {
+ "image": image,
+ "name": name,
+ "size": size,
+ "backup_policy": backup_policy,
+ "backups": backups,
+ "ipv6": ipv6,
+ "monitoring": monitoring,
+ "private_networking": private_networking,
+ "region": region,
+ "ssh_keys": ssh_keys,
+ "tags": tags,
+ "user_data": user_data,
+ "volumes": volumes,
+ "vpc_uuid": vpc_uuid,
+ "with_droplet_agent": with_droplet_agent,
+ "names": names,
+ },
+ gpu_droplet_create_params.GPUDropletCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=cast(
+ Any, GPUDropletCreateResponse
+ ), # Union types cannot be passed in as arguments in the type system
+ ),
+ )
+
+ def retrieve(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletRetrieveResponse:
+ """
+ To show information about an individual Droplet, send a GET request to
+ `/v2/droplets/$DROPLET_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=GPUDropletRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ name: str | Omit = omit,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ tag_name: str | Omit = omit,
+ type: Literal["droplets", "gpus"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletListResponse:
+ """
+ To list all Droplets in your account, send a GET request to `/v2/droplets`.
+
+ The response body will be a JSON object with a key of `droplets`. This will be
+ set to an array containing objects each representing a Droplet. These will
+ contain the standard Droplet attributes.
+
+ ### Filtering Results by Tag
+
+ It's possible to request filtered results by including certain query parameters.
+ To only list Droplets assigned to a specific tag, include the `tag_name` query
+ parameter set to the name of the tag in your GET request. For example,
+ `/v2/droplets?tag_name=$TAG_NAME`.
+
+ ### GPU Droplets
+
+ By default, only non-GPU Droplets are returned. To list only GPU Droplets, set
+ the `type` query parameter to `gpus`. For example, `/v2/droplets?type=gpus`.
+
+ Args:
+ name: Used to filter list response by Droplet name returning only exact matches. It is
+ case-insensitive and can not be combined with `tag_name`.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or
+ `type`. Requires `tag:read` scope.
+
+ type: When `type` is set to `gpus`, only GPU Droplets will be returned. By default,
+ only non-GPU Droplets are returned. Can not be combined with `tag_name`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "name": name,
+ "page": page,
+ "per_page": per_page,
+ "tag_name": tag_name,
+ "type": type,
+ },
+ gpu_droplet_list_params.GPUDropletListParams,
+ ),
+ ),
+ cast_to=GPUDropletListResponse,
+ )
+
+ def delete(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a Droplet, send a DELETE request to `/v2/droplets/$DROPLET_ID`.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/droplets/{droplet_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def delete_by_tag(
+ self,
+ *,
+ tag_name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete **all** Droplets assigned to a specific tag, include the `tag_name`
+ query parameter set to the name of the tag in your DELETE request. For example,
+ `/v2/droplets?tag_name=$TAG_NAME`.
+
+ This endpoint requires `tag:read` scope.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ Args:
+ tag_name: Specifies Droplets to be deleted by tag.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {"tag_name": tag_name}, gpu_droplet_delete_by_tag_params.GPUDropletDeleteByTagParams
+ ),
+ ),
+ cast_to=NoneType,
+ )
+
+ def list_firewalls(
+ self,
+ droplet_id: int,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletListFirewallsResponse:
+ """
+ To retrieve a list of all firewalls available to a Droplet, send a GET request
+ to `/v2/droplets/$DROPLET_ID/firewalls`
+
+ The response will be a JSON object that has a key called `firewalls`. This will
+ be set to an array of `firewall` objects, each of which contain the standard
+ `firewall` attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}/firewalls"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/firewalls",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ gpu_droplet_list_firewalls_params.GPUDropletListFirewallsParams,
+ ),
+ ),
+ cast_to=GPUDropletListFirewallsResponse,
+ )
+
+ def list_kernels(
+ self,
+ droplet_id: int,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletListKernelsResponse:
+ """
+ To retrieve a list of all kernels available to a Droplet, send a GET request to
+ `/v2/droplets/$DROPLET_ID/kernels`
+
+ The response will be a JSON object that has a key called `kernels`. This will be
+ set to an array of `kernel` objects, each of which contain the standard `kernel`
+ attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}/kernels"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/kernels",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ gpu_droplet_list_kernels_params.GPUDropletListKernelsParams,
+ ),
+ ),
+ cast_to=GPUDropletListKernelsResponse,
+ )
+
+ def list_neighbors(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletListNeighborsResponse:
+ """To retrieve a list of any "neighbors" (i.e.
+
+ Droplets that are co-located on the
+ same physical hardware) for a specific Droplet, send a GET request to
+ `/v2/droplets/$DROPLET_ID/neighbors`.
+
+ The results will be returned as a JSON object with a key of `droplets`. This
+ will be set to an array containing objects representing any other Droplets that
+ share the same physical hardware. An empty array indicates that the Droplet is
+ not co-located any other Droplets associated with your account.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}/neighbors"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/neighbors",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=GPUDropletListNeighborsResponse,
+ )
+
+ def list_snapshots(
+ self,
+ droplet_id: int,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletListSnapshotsResponse:
+ """
+ To retrieve the snapshots that have been created from a Droplet, send a GET
+ request to `/v2/droplets/$DROPLET_ID/snapshots`.
+
+ You will get back a JSON object that has a `snapshots` key. This will be set to
+ an array of snapshot objects, each of which contain the standard Droplet
+ snapshot attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/droplets/{droplet_id}/snapshots"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/snapshots",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ gpu_droplet_list_snapshots_params.GPUDropletListSnapshotsParams,
+ ),
+ ),
+ cast_to=GPUDropletListSnapshotsResponse,
+ )
+
+
+class AsyncGPUDropletsResource(AsyncAPIResource):
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+
+ @cached_property
+ def backups(self) -> AsyncBackupsResource:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return AsyncBackupsResource(self._client)
+
+ @cached_property
+ def actions(self) -> AsyncActionsResource:
+ """Droplet actions are tasks that can be executed on a Droplet.
+
+ These can be
+ things like rebooting, resizing, snapshotting, etc.
+
+ Droplet action requests are generally targeted at one of the "actions"
+ endpoints for a specific Droplet. The specific actions are usually
+ initiated by sending a POST request with the action and arguments as
+ parameters.
+
+ Droplet action requests create a Droplet actions object, which can be used
+ to get information about the status of an action. Creating a Droplet
+ action is asynchronous: the HTTP call will return the action object before
+ the action has finished processing on the Droplet. The current status of
+ an action can be retrieved from either the Droplet actions endpoint or the
+ global actions endpoint. If a Droplet action is uncompleted it may block
+ the creation of a subsequent action for that Droplet, the locked attribute
+ of the Droplet will be true and attempts to create a Droplet action will
+ fail with a status of 422.
+ """
+ return AsyncActionsResource(self._client)
+
+ @cached_property
+ def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResource:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return AsyncDestroyWithAssociatedResourcesResource(self._client)
+
+ @cached_property
+ def autoscale(self) -> AsyncAutoscaleResource:
+ """
+ Droplet autoscale pools manage automatic horizontal scaling for your applications based on resource usage (CPU, memory, or both) or a static configuration.
+ """
+ return AsyncAutoscaleResource(self._client)
+
+ @cached_property
+ def firewalls(self) -> AsyncFirewallsResource:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncFirewallsResource(self._client)
+
+ @cached_property
+ def floating_ips(self) -> AsyncFloatingIPsResource:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ [DigitalOcean Floating IPs](https://docs.digitalocean.com/products/networking/reserved-ips/)
+ are publicly-accessible static IP addresses that can be mapped to one of
+ your Droplets. They can be used to create highly available setups or other
+ configurations requiring movable addresses.
+
+ Floating IPs are bound to a specific region.
+ """
+ return AsyncFloatingIPsResource(self._client)
+
+ @cached_property
+ def images(self) -> AsyncImagesResource:
+ """
+ A DigitalOcean [image](https://docs.digitalocean.com/products/images/) can be
+ used to create a Droplet and may come in a number of flavors. Currently,
+ there are five types of images: snapshots, backups, applications,
+ distributions, and custom images.
+
+ * [Snapshots](https://docs.digitalocean.com/products/snapshots/) provide
+ a full copy of an existing Droplet instance taken on demand.
+
+ * [Backups](https://docs.digitalocean.com/products/backups/) are similar
+ to snapshots but are created automatically at regular intervals when
+ enabled for a Droplet.
+
+ * [Custom images](https://docs.digitalocean.com/products/custom-images/)
+ are Linux-based virtual machine images (raw, qcow2, vhdx, vdi, and vmdk
+ formats are supported) that you may upload for use on DigitalOcean.
+
+ * Distributions are the public Linux distributions that are available to
+ be used as a base to create Droplets.
+
+ * Applications, or [1-Click Apps](https://docs.digitalocean.com/products/marketplace/),
+ are distributions pre-configured with additional software.
+
+ To interact with images, you will generally send requests to the images
+ endpoint at /v2/images.
+ """
+ return AsyncImagesResource(self._client)
+
+ @cached_property
+ def load_balancers(self) -> AsyncLoadBalancersResource:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return AsyncLoadBalancersResource(self._client)
+
+ @cached_property
+ def sizes(self) -> AsyncSizesResource:
+ """
+ The sizes objects represent different packages of hardware resources that
+ can be used for Droplets. When a Droplet is created, a size must be
+ selected so that the correct resources can be allocated.
+
+ Each size represents a plan that bundles together specific sets of
+ resources. This includes the amount of RAM, the number of virtual CPUs,
+ disk space, and transfer. The size object also includes the pricing
+ details and the regions that the size is available in.
+ """
+ return AsyncSizesResource(self._client)
+
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResource:
+ """
+ [Snapshots](https://docs.digitalocean.com/products/snapshots/) are saved
+ instances of a Droplet or a block storage volume, which is reflected in
+ the `resource_type` attribute. In order to avoid problems with compressing
+ filesystems, each defines a `min_disk_size` attribute which is the minimum
+ size of the Droplet or volume disk when creating a new resource from the
+ saved snapshot.
+
+ To interact with snapshots, you will generally send requests to the
+ snapshots endpoint at `/v2/snapshots`.
+ """
+ return AsyncSnapshotsResource(self._client)
+
+ @cached_property
+ def volumes(self) -> AsyncVolumesResource:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return AsyncVolumesResource(self._client)
+
+ @cached_property
+ def account(self) -> AsyncAccountResource:
+ return AsyncAccountResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncGPUDropletsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncGPUDropletsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncGPUDropletsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncGPUDropletsResourceWithStreamingResponse(self)
+
+ @overload
+ async def create(
+ self,
+ *,
+ image: Union[str, int],
+ name: str,
+ size: str,
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ backups: bool | Omit = omit,
+ ipv6: bool | Omit = omit,
+ monitoring: bool | Omit = omit,
+ private_networking: bool | Omit = omit,
+ region: str | Omit = omit,
+ ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ user_data: str | Omit = omit,
+ volumes: SequenceNotStr[str] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ with_droplet_agent: bool | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletCreateResponse:
+ """
+ To create a new Droplet, send a POST request to `/v2/droplets` setting the
+ required attributes.
+
+ A Droplet will be created using the provided information. The response body will
+ contain a JSON object with a key called `droplet`. The value will be an object
+ containing the standard attributes for your new Droplet. The response code, 202
+ Accepted, does not indicate the success or failure of the operation, just that
+ the request has been accepted for processing. The `actions` returned as part of
+ the response's `links` object can be used to check the status of the Droplet
+ create event.
+
+ ### Create Multiple Droplets
+
+ Creating multiple Droplets is very similar to creating a single Droplet. Instead
+ of sending `name` as a string, send `names` as an array of strings. A Droplet
+ will be created for each name you send using the associated information. Up to
+ ten Droplets may be created this way at a time.
+
+ Rather than returning a single Droplet, the response body will contain a JSON
+ array with a key called `droplets`. This will be set to an array of JSON
+ objects, each of which will contain the standard Droplet attributes. The
+ response code, 202 Accepted, does not indicate the success or failure of any
+ operation, just that the request has been accepted for processing. The array of
+ `actions` returned as part of the response's `links` object can be used to check
+ the status of each individual Droplet create event.
+
+ Args:
+ image: The image ID of a public or private image or the slug identifier for a public
+ image. This image will be the base image for your Droplet. Requires `image:read`
+ scope.
+
+ name: The human-readable string you wish to use when displaying the Droplet name. The
+ name, if set to a domain name managed in the DigitalOcean DNS management system,
+ will configure a PTR record for the Droplet. The name set during creation will
+ also determine the hostname for the Droplet in its internal configuration.
+
+ size: The slug identifier for the size that you wish to select for this Droplet.
+
+ backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups`
+ is `true`, the backup plan will default to daily.
+
+ backups: A boolean indicating whether automated backups should be enabled for the
+ Droplet.
+
+ ipv6: A boolean indicating whether to enable IPv6 on the Droplet.
+
+ monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring.
+
+ private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC
+ network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be
+ placed in your account's default VPC for the region.
+
+ region: The slug identifier for the region that you wish to deploy the Droplet in. If
+ the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be
+ used to deploy the Droplet in any of the that region's locations (`nyc1`,
+ `nyc2`, or `nyc3`). If the region is omitted from the create request completely,
+ the Droplet may deploy in any region.
+
+ ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to
+ embed in the Droplet's root account upon creation. You must add the keys to your
+ team before they can be embedded on a Droplet. Requires `ssh_key:read` scope.
+
+ tags: A flat array of tag names as strings to apply to the Droplet after it is
+ created. Tag names can either be existing or new tags. Requires `tag:create`
+ scope.
+
+ user_data: A string containing 'user data' which may be used to configure the Droplet on
+ first boot, often a 'cloud-config' file or Bash script. It must be plain text
+ and may not exceed 64 KiB in size.
+
+ volumes: An array of IDs for block storage volumes that will be attached to the Droplet
+ once created. The volumes must not already be attached to an existing Droplet.
+ Requires `block_storage:read` scpoe.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned.
+ If excluded, the Droplet will be assigned to your account's default VPC for the
+ region. Requires `vpc:read` scope.
+
+ with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for
+ providing access to the Droplet web console in the control panel. By default,
+ the agent is installed on new Droplets but installation errors (i.e. OS not
+ supported) are ignored. To prevent it from being installed, set to `false`. To
+ make installation errors fatal, explicitly set it to `true`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ image: Union[str, int],
+ names: SequenceNotStr[str],
+ size: str,
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ backups: bool | Omit = omit,
+ ipv6: bool | Omit = omit,
+ monitoring: bool | Omit = omit,
+ private_networking: bool | Omit = omit,
+ region: str | Omit = omit,
+ ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ user_data: str | Omit = omit,
+ volumes: SequenceNotStr[str] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ with_droplet_agent: bool | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletCreateResponse:
+ """
+ To create a new Droplet, send a POST request to `/v2/droplets` setting the
+ required attributes.
+
+ A Droplet will be created using the provided information. The response body will
+ contain a JSON object with a key called `droplet`. The value will be an object
+ containing the standard attributes for your new Droplet. The response code, 202
+ Accepted, does not indicate the success or failure of the operation, just that
+ the request has been accepted for processing. The `actions` returned as part of
+ the response's `links` object can be used to check the status of the Droplet
+ create event.
+
+ ### Create Multiple Droplets
+
+ Creating multiple Droplets is very similar to creating a single Droplet. Instead
+ of sending `name` as a string, send `names` as an array of strings. A Droplet
+ will be created for each name you send using the associated information. Up to
+ ten Droplets may be created this way at a time.
+
+ Rather than returning a single Droplet, the response body will contain a JSON
+ array with a key called `droplets`. This will be set to an array of JSON
+ objects, each of which will contain the standard Droplet attributes. The
+ response code, 202 Accepted, does not indicate the success or failure of any
+ operation, just that the request has been accepted for processing. The array of
+ `actions` returned as part of the response's `links` object can be used to check
+ the status of each individual Droplet create event.
+
+ Args:
+ image: The image ID of a public or private image or the slug identifier for a public
+ image. This image will be the base image for your Droplet. Requires `image:read`
+ scope.
+
+ names: An array of human human-readable strings you wish to use when displaying the
+ Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS
+ management system, will configure a PTR record for the Droplet. Each name set
+ during creation will also determine the hostname for the Droplet in its internal
+ configuration.
+
+ size: The slug identifier for the size that you wish to select for this Droplet.
+
+ backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups`
+ is `true`, the backup plan will default to daily.
+
+ backups: A boolean indicating whether automated backups should be enabled for the
+ Droplet.
+
+ ipv6: A boolean indicating whether to enable IPv6 on the Droplet.
+
+ monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring.
+
+ private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC
+ network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be
+ placed in your account's default VPC for the region.
+
+ region: The slug identifier for the region that you wish to deploy the Droplet in. If
+ the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be
+ used to deploy the Droplet in any of the that region's locations (`nyc1`,
+ `nyc2`, or `nyc3`). If the region is omitted from the create request completely,
+ the Droplet may deploy in any region.
+
+ ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to
+ embed in the Droplet's root account upon creation. You must add the keys to your
+ team before they can be embedded on a Droplet. Requires `ssh_key:read` scope.
+
+ tags: A flat array of tag names as strings to apply to the Droplet after it is
+ created. Tag names can either be existing or new tags. Requires `tag:create`
+ scope.
+
+ user_data: A string containing 'user data' which may be used to configure the Droplet on
+ first boot, often a 'cloud-config' file or Bash script. It must be plain text
+ and may not exceed 64 KiB in size.
+
+ volumes: An array of IDs for block storage volumes that will be attached to the Droplet
+ once created. The volumes must not already be attached to an existing Droplet.
+ Requires `block_storage:read` scpoe.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned.
+ If excluded, the Droplet will be assigned to your account's default VPC for the
+ region. Requires `vpc:read` scope.
+
+ with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for
+ providing access to the Droplet web console in the control panel. By default,
+ the agent is installed on new Droplets but installation errors (i.e. OS not
+ supported) are ignored. To prevent it from being installed, set to `false`. To
+ make installation errors fatal, explicitly set it to `true`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["image", "name", "size"], ["image", "names", "size"])
+ async def create(
+ self,
+ *,
+ image: Union[str, int],
+ name: str | Omit = omit,
+ size: str,
+ backup_policy: DropletBackupPolicyParam | Omit = omit,
+ backups: bool | Omit = omit,
+ ipv6: bool | Omit = omit,
+ monitoring: bool | Omit = omit,
+ private_networking: bool | Omit = omit,
+ region: str | Omit = omit,
+ ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ user_data: str | Omit = omit,
+ volumes: SequenceNotStr[str] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ with_droplet_agent: bool | Omit = omit,
+ names: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletCreateResponse:
+ return cast(
+ GPUDropletCreateResponse,
+ await self._post(
+ "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets",
+ body=await async_maybe_transform(
+ {
+ "image": image,
+ "name": name,
+ "size": size,
+ "backup_policy": backup_policy,
+ "backups": backups,
+ "ipv6": ipv6,
+ "monitoring": monitoring,
+ "private_networking": private_networking,
+ "region": region,
+ "ssh_keys": ssh_keys,
+ "tags": tags,
+ "user_data": user_data,
+ "volumes": volumes,
+ "vpc_uuid": vpc_uuid,
+ "with_droplet_agent": with_droplet_agent,
+ "names": names,
+ },
+ gpu_droplet_create_params.GPUDropletCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=cast(
+ Any, GPUDropletCreateResponse
+ ), # Union types cannot be passed in as arguments in the type system
+ ),
+ )
+
+ async def retrieve(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletRetrieveResponse:
+ """
+ To show information about an individual Droplet, send a GET request to
+ `/v2/droplets/$DROPLET_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=GPUDropletRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ name: str | Omit = omit,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ tag_name: str | Omit = omit,
+ type: Literal["droplets", "gpus"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletListResponse:
+ """
+ To list all Droplets in your account, send a GET request to `/v2/droplets`.
+
+ The response body will be a JSON object with a key of `droplets`. This will be
+ set to an array containing objects each representing a Droplet. These will
+ contain the standard Droplet attributes.
+
+ ### Filtering Results by Tag
+
+ It's possible to request filtered results by including certain query parameters.
+ To only list Droplets assigned to a specific tag, include the `tag_name` query
+ parameter set to the name of the tag in your GET request. For example,
+ `/v2/droplets?tag_name=$TAG_NAME`.
+
+ ### GPU Droplets
+
+ By default, only non-GPU Droplets are returned. To list only GPU Droplets, set
+ the `type` query parameter to `gpus`. For example, `/v2/droplets?type=gpus`.
+
+ Args:
+ name: Used to filter list response by Droplet name returning only exact matches. It is
+ case-insensitive and can not be combined with `tag_name`.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or
+ `type`. Requires `tag:read` scope.
+
+ type: When `type` is set to `gpus`, only GPU Droplets will be returned. By default,
+ only non-GPU Droplets are returned. Can not be combined with `tag_name`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "name": name,
+ "page": page,
+ "per_page": per_page,
+ "tag_name": tag_name,
+ "type": type,
+ },
+ gpu_droplet_list_params.GPUDropletListParams,
+ ),
+ ),
+ cast_to=GPUDropletListResponse,
+ )
+
+ async def delete(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a Droplet, send a DELETE request to `/v2/droplets/$DROPLET_ID`.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/droplets/{droplet_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def delete_by_tag(
+ self,
+ *,
+ tag_name: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete **all** Droplets assigned to a specific tag, include the `tag_name`
+ query parameter set to the name of the tag in your DELETE request. For example,
+ `/v2/droplets?tag_name=$TAG_NAME`.
+
+ This endpoint requires `tag:read` scope.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ Args:
+ tag_name: Specifies Droplets to be deleted by tag.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {"tag_name": tag_name}, gpu_droplet_delete_by_tag_params.GPUDropletDeleteByTagParams
+ ),
+ ),
+ cast_to=NoneType,
+ )
+
+ async def list_firewalls(
+ self,
+ droplet_id: int,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletListFirewallsResponse:
+ """
+ To retrieve a list of all firewalls available to a Droplet, send a GET request
+ to `/v2/droplets/$DROPLET_ID/firewalls`
+
+ The response will be a JSON object that has a key called `firewalls`. This will
+ be set to an array of `firewall` objects, each of which contain the standard
+ `firewall` attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}/firewalls"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/firewalls",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ gpu_droplet_list_firewalls_params.GPUDropletListFirewallsParams,
+ ),
+ ),
+ cast_to=GPUDropletListFirewallsResponse,
+ )
+
+ async def list_kernels(
+ self,
+ droplet_id: int,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletListKernelsResponse:
+ """
+ To retrieve a list of all kernels available to a Droplet, send a GET request to
+ `/v2/droplets/$DROPLET_ID/kernels`
+
+ The response will be a JSON object that has a key called `kernels`. This will be
+ set to an array of `kernel` objects, each of which contain the standard `kernel`
+ attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}/kernels"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/kernels",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ gpu_droplet_list_kernels_params.GPUDropletListKernelsParams,
+ ),
+ ),
+ cast_to=GPUDropletListKernelsResponse,
+ )
+
+ async def list_neighbors(
+ self,
+ droplet_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletListNeighborsResponse:
+ """To retrieve a list of any "neighbors" (i.e.
+
+ Droplets that are co-located on the
+ same physical hardware) for a specific Droplet, send a GET request to
+ `/v2/droplets/$DROPLET_ID/neighbors`.
+
+ The results will be returned as a JSON object with a key of `droplets`. This
+ will be set to an array containing objects representing any other Droplets that
+ share the same physical hardware. An empty array indicates that the Droplet is
+ not co-located any other Droplets associated with your account.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}/neighbors"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/neighbors",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=GPUDropletListNeighborsResponse,
+ )
+
+ async def list_snapshots(
+ self,
+ droplet_id: int,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GPUDropletListSnapshotsResponse:
+ """
+ To retrieve the snapshots that have been created from a Droplet, send a GET
+ request to `/v2/droplets/$DROPLET_ID/snapshots`.
+
+ You will get back a JSON object that has a `snapshots` key. This will be set to
+ an array of snapshot objects, each of which contain the standard Droplet
+ snapshot attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/droplets/{droplet_id}/snapshots"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/snapshots",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ gpu_droplet_list_snapshots_params.GPUDropletListSnapshotsParams,
+ ),
+ ),
+ cast_to=GPUDropletListSnapshotsResponse,
+ )
+
+
+class GPUDropletsResourceWithRawResponse:
+ def __init__(self, gpu_droplets: GPUDropletsResource) -> None:
+ self._gpu_droplets = gpu_droplets
+
+ self.create = to_raw_response_wrapper(
+ gpu_droplets.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ gpu_droplets.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ gpu_droplets.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ gpu_droplets.delete,
+ )
+ self.delete_by_tag = to_raw_response_wrapper(
+ gpu_droplets.delete_by_tag,
+ )
+ self.list_firewalls = to_raw_response_wrapper(
+ gpu_droplets.list_firewalls,
+ )
+ self.list_kernels = to_raw_response_wrapper(
+ gpu_droplets.list_kernels,
+ )
+ self.list_neighbors = to_raw_response_wrapper(
+ gpu_droplets.list_neighbors,
+ )
+ self.list_snapshots = to_raw_response_wrapper(
+ gpu_droplets.list_snapshots,
+ )
+
+ @cached_property
+ def backups(self) -> BackupsResourceWithRawResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return BackupsResourceWithRawResponse(self._gpu_droplets.backups)
+
+ @cached_property
+ def actions(self) -> ActionsResourceWithRawResponse:
+ """Droplet actions are tasks that can be executed on a Droplet.
+
+ These can be
+ things like rebooting, resizing, snapshotting, etc.
+
+ Droplet action requests are generally targeted at one of the "actions"
+ endpoints for a specific Droplet. The specific actions are usually
+ initiated by sending a POST request with the action and arguments as
+ parameters.
+
+ Droplet action requests create a Droplet actions object, which can be used
+ to get information about the status of an action. Creating a Droplet
+ action is asynchronous: the HTTP call will return the action object before
+ the action has finished processing on the Droplet. The current status of
+ an action can be retrieved from either the Droplet actions endpoint or the
+ global actions endpoint. If a Droplet action is uncompleted it may block
+ the creation of a subsequent action for that Droplet, the locked attribute
+ of the Droplet will be true and attempts to create a Droplet action will
+ fail with a status of 422.
+ """
+ return ActionsResourceWithRawResponse(self._gpu_droplets.actions)
+
+ @cached_property
+ def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResourceWithRawResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return DestroyWithAssociatedResourcesResourceWithRawResponse(
+ self._gpu_droplets.destroy_with_associated_resources
+ )
+
+ @cached_property
+ def autoscale(self) -> AutoscaleResourceWithRawResponse:
+ """
+ Droplet autoscale pools manage automatic horizontal scaling for your applications based on resource usage (CPU, memory, or both) or a static configuration.
+ """
+ return AutoscaleResourceWithRawResponse(self._gpu_droplets.autoscale)
+
+ @cached_property
+ def firewalls(self) -> FirewallsResourceWithRawResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return FirewallsResourceWithRawResponse(self._gpu_droplets.firewalls)
+
+ @cached_property
+ def floating_ips(self) -> FloatingIPsResourceWithRawResponse:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ [DigitalOcean Floating IPs](https://docs.digitalocean.com/products/networking/reserved-ips/)
+ are publicly-accessible static IP addresses that can be mapped to one of
+ your Droplets. They can be used to create highly available setups or other
+ configurations requiring movable addresses.
+
+ Floating IPs are bound to a specific region.
+ """
+ return FloatingIPsResourceWithRawResponse(self._gpu_droplets.floating_ips)
+
+ @cached_property
+ def images(self) -> ImagesResourceWithRawResponse:
+ """
+ A DigitalOcean [image](https://docs.digitalocean.com/products/images/) can be
+ used to create a Droplet and may come in a number of flavors. Currently,
+ there are five types of images: snapshots, backups, applications,
+ distributions, and custom images.
+
+ * [Snapshots](https://docs.digitalocean.com/products/snapshots/) provide
+ a full copy of an existing Droplet instance taken on demand.
+
+ * [Backups](https://docs.digitalocean.com/products/backups/) are similar
+ to snapshots but are created automatically at regular intervals when
+ enabled for a Droplet.
+
+ * [Custom images](https://docs.digitalocean.com/products/custom-images/)
+ are Linux-based virtual machine images (raw, qcow2, vhdx, vdi, and vmdk
+ formats are supported) that you may upload for use on DigitalOcean.
+
+ * Distributions are the public Linux distributions that are available to
+ be used as a base to create Droplets.
+
+ * Applications, or [1-Click Apps](https://docs.digitalocean.com/products/marketplace/),
+ are distributions pre-configured with additional software.
+
+ To interact with images, you will generally send requests to the images
+ endpoint at /v2/images.
+ """
+ return ImagesResourceWithRawResponse(self._gpu_droplets.images)
+
+ @cached_property
+ def load_balancers(self) -> LoadBalancersResourceWithRawResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return LoadBalancersResourceWithRawResponse(self._gpu_droplets.load_balancers)
+
+ @cached_property
+ def sizes(self) -> SizesResourceWithRawResponse:
+ """
+ The sizes objects represent different packages of hardware resources that
+ can be used for Droplets. When a Droplet is created, a size must be
+ selected so that the correct resources can be allocated.
+
+ Each size represents a plan that bundles together specific sets of
+ resources. This includes the amount of RAM, the number of virtual CPUs,
+ disk space, and transfer. The size object also includes the pricing
+ details and the regions that the size is available in.
+ """
+ return SizesResourceWithRawResponse(self._gpu_droplets.sizes)
+
+ @cached_property
+ def snapshots(self) -> SnapshotsResourceWithRawResponse:
+ """
+ [Snapshots](https://docs.digitalocean.com/products/snapshots/) are saved
+ instances of a Droplet or a block storage volume, which is reflected in
+ the `resource_type` attribute. In order to avoid problems with compressing
+ filesystems, each defines a `min_disk_size` attribute which is the minimum
+ size of the Droplet or volume disk when creating a new resource from the
+ saved snapshot.
+
+ To interact with snapshots, you will generally send requests to the
+ snapshots endpoint at `/v2/snapshots`.
+ """
+ return SnapshotsResourceWithRawResponse(self._gpu_droplets.snapshots)
+
+ @cached_property
+ def volumes(self) -> VolumesResourceWithRawResponse:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return VolumesResourceWithRawResponse(self._gpu_droplets.volumes)
+
+ @cached_property
+ def account(self) -> AccountResourceWithRawResponse:
+ return AccountResourceWithRawResponse(self._gpu_droplets.account)
+
+
+class AsyncGPUDropletsResourceWithRawResponse:
+ def __init__(self, gpu_droplets: AsyncGPUDropletsResource) -> None:
+ self._gpu_droplets = gpu_droplets
+
+ self.create = async_to_raw_response_wrapper(
+ gpu_droplets.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ gpu_droplets.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ gpu_droplets.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ gpu_droplets.delete,
+ )
+ self.delete_by_tag = async_to_raw_response_wrapper(
+ gpu_droplets.delete_by_tag,
+ )
+ self.list_firewalls = async_to_raw_response_wrapper(
+ gpu_droplets.list_firewalls,
+ )
+ self.list_kernels = async_to_raw_response_wrapper(
+ gpu_droplets.list_kernels,
+ )
+ self.list_neighbors = async_to_raw_response_wrapper(
+ gpu_droplets.list_neighbors,
+ )
+ self.list_snapshots = async_to_raw_response_wrapper(
+ gpu_droplets.list_snapshots,
+ )
+
+ @cached_property
+ def backups(self) -> AsyncBackupsResourceWithRawResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return AsyncBackupsResourceWithRawResponse(self._gpu_droplets.backups)
+
+ @cached_property
+ def actions(self) -> AsyncActionsResourceWithRawResponse:
+ """Droplet actions are tasks that can be executed on a Droplet.
+
+ These can be
+ things like rebooting, resizing, snapshotting, etc.
+
+ Droplet action requests are generally targeted at one of the "actions"
+ endpoints for a specific Droplet. The specific actions are usually
+ initiated by sending a POST request with the action and arguments as
+ parameters.
+
+ Droplet action requests create a Droplet actions object, which can be used
+ to get information about the status of an action. Creating a Droplet
+ action is asynchronous: the HTTP call will return the action object before
+ the action has finished processing on the Droplet. The current status of
+ an action can be retrieved from either the Droplet actions endpoint or the
+ global actions endpoint. If a Droplet action is uncompleted it may block
+ the creation of a subsequent action for that Droplet, the locked attribute
+ of the Droplet will be true and attempts to create a Droplet action will
+ fail with a status of 422.
+ """
+ return AsyncActionsResourceWithRawResponse(self._gpu_droplets.actions)
+
+ @cached_property
+ def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResourceWithRawResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return AsyncDestroyWithAssociatedResourcesResourceWithRawResponse(
+ self._gpu_droplets.destroy_with_associated_resources
+ )
+
+ @cached_property
+ def autoscale(self) -> AsyncAutoscaleResourceWithRawResponse:
+ """
+ Droplet autoscale pools manage automatic horizontal scaling for your applications based on resource usage (CPU, memory, or both) or a static configuration.
+ """
+ return AsyncAutoscaleResourceWithRawResponse(self._gpu_droplets.autoscale)
+
+ @cached_property
+ def firewalls(self) -> AsyncFirewallsResourceWithRawResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncFirewallsResourceWithRawResponse(self._gpu_droplets.firewalls)
+
+ @cached_property
+ def floating_ips(self) -> AsyncFloatingIPsResourceWithRawResponse:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ [DigitalOcean Floating IPs](https://docs.digitalocean.com/products/networking/reserved-ips/)
+ are publicly-accessible static IP addresses that can be mapped to one of
+ your Droplets. They can be used to create highly available setups or other
+ configurations requiring movable addresses.
+
+ Floating IPs are bound to a specific region.
+ """
+ return AsyncFloatingIPsResourceWithRawResponse(self._gpu_droplets.floating_ips)
+
+ @cached_property
+ def images(self) -> AsyncImagesResourceWithRawResponse:
+ """
+ A DigitalOcean [image](https://docs.digitalocean.com/products/images/) can be
+ used to create a Droplet and may come in a number of flavors. Currently,
+ there are five types of images: snapshots, backups, applications,
+ distributions, and custom images.
+
+ * [Snapshots](https://docs.digitalocean.com/products/snapshots/) provide
+ a full copy of an existing Droplet instance taken on demand.
+
+ * [Backups](https://docs.digitalocean.com/products/backups/) are similar
+ to snapshots but are created automatically at regular intervals when
+ enabled for a Droplet.
+
+ * [Custom images](https://docs.digitalocean.com/products/custom-images/)
+ are Linux-based virtual machine images (raw, qcow2, vhdx, vdi, and vmdk
+ formats are supported) that you may upload for use on DigitalOcean.
+
+ * Distributions are the public Linux distributions that are available to
+ be used as a base to create Droplets.
+
+ * Applications, or [1-Click Apps](https://docs.digitalocean.com/products/marketplace/),
+ are distributions pre-configured with additional software.
+
+ To interact with images, you will generally send requests to the images
+ endpoint at /v2/images.
+ """
+ return AsyncImagesResourceWithRawResponse(self._gpu_droplets.images)
+
+ @cached_property
+ def load_balancers(self) -> AsyncLoadBalancersResourceWithRawResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return AsyncLoadBalancersResourceWithRawResponse(self._gpu_droplets.load_balancers)
+
+ @cached_property
+ def sizes(self) -> AsyncSizesResourceWithRawResponse:
+ """
+ The sizes objects represent different packages of hardware resources that
+ can be used for Droplets. When a Droplet is created, a size must be
+ selected so that the correct resources can be allocated.
+
+ Each size represents a plan that bundles together specific sets of
+ resources. This includes the amount of RAM, the number of virtual CPUs,
+ disk space, and transfer. The size object also includes the pricing
+ details and the regions that the size is available in.
+ """
+ return AsyncSizesResourceWithRawResponse(self._gpu_droplets.sizes)
+
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResourceWithRawResponse:
+ """
+ [Snapshots](https://docs.digitalocean.com/products/snapshots/) are saved
+ instances of a Droplet or a block storage volume, which is reflected in
+ the `resource_type` attribute. In order to avoid problems with compressing
+ filesystems, each defines a `min_disk_size` attribute which is the minimum
+ size of the Droplet or volume disk when creating a new resource from the
+ saved snapshot.
+
+ To interact with snapshots, you will generally send requests to the
+ snapshots endpoint at `/v2/snapshots`.
+ """
+ return AsyncSnapshotsResourceWithRawResponse(self._gpu_droplets.snapshots)
+
+ @cached_property
+ def volumes(self) -> AsyncVolumesResourceWithRawResponse:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return AsyncVolumesResourceWithRawResponse(self._gpu_droplets.volumes)
+
+ @cached_property
+ def account(self) -> AsyncAccountResourceWithRawResponse:
+ return AsyncAccountResourceWithRawResponse(self._gpu_droplets.account)
+
+
+class GPUDropletsResourceWithStreamingResponse:
+ def __init__(self, gpu_droplets: GPUDropletsResource) -> None:
+ self._gpu_droplets = gpu_droplets
+
+ self.create = to_streamed_response_wrapper(
+ gpu_droplets.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ gpu_droplets.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ gpu_droplets.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ gpu_droplets.delete,
+ )
+ self.delete_by_tag = to_streamed_response_wrapper(
+ gpu_droplets.delete_by_tag,
+ )
+ self.list_firewalls = to_streamed_response_wrapper(
+ gpu_droplets.list_firewalls,
+ )
+ self.list_kernels = to_streamed_response_wrapper(
+ gpu_droplets.list_kernels,
+ )
+ self.list_neighbors = to_streamed_response_wrapper(
+ gpu_droplets.list_neighbors,
+ )
+ self.list_snapshots = to_streamed_response_wrapper(
+ gpu_droplets.list_snapshots,
+ )
+
+ @cached_property
+ def backups(self) -> BackupsResourceWithStreamingResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return BackupsResourceWithStreamingResponse(self._gpu_droplets.backups)
+
+ @cached_property
+ def actions(self) -> ActionsResourceWithStreamingResponse:
+ """Droplet actions are tasks that can be executed on a Droplet.
+
+ These can be
+ things like rebooting, resizing, snapshotting, etc.
+
+ Droplet action requests are generally targeted at one of the "actions"
+ endpoints for a specific Droplet. The specific actions are usually
+ initiated by sending a POST request with the action and arguments as
+ parameters.
+
+ Droplet action requests create a Droplet actions object, which can be used
+ to get information about the status of an action. Creating a Droplet
+ action is asynchronous: the HTTP call will return the action object before
+ the action has finished processing on the Droplet. The current status of
+ an action can be retrieved from either the Droplet actions endpoint or the
+ global actions endpoint. If a Droplet action is uncompleted it may block
+ the creation of a subsequent action for that Droplet, the locked attribute
+ of the Droplet will be true and attempts to create a Droplet action will
+ fail with a status of 422.
+ """
+ return ActionsResourceWithStreamingResponse(self._gpu_droplets.actions)
+
+ @cached_property
+ def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResourceWithStreamingResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return DestroyWithAssociatedResourcesResourceWithStreamingResponse(
+ self._gpu_droplets.destroy_with_associated_resources
+ )
+
+ @cached_property
+ def autoscale(self) -> AutoscaleResourceWithStreamingResponse:
+ """
+ Droplet autoscale pools manage automatic horizontal scaling for your applications based on resource usage (CPU, memory, or both) or a static configuration.
+ """
+ return AutoscaleResourceWithStreamingResponse(self._gpu_droplets.autoscale)
+
+ @cached_property
+ def firewalls(self) -> FirewallsResourceWithStreamingResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return FirewallsResourceWithStreamingResponse(self._gpu_droplets.firewalls)
+
+ @cached_property
+ def floating_ips(self) -> FloatingIPsResourceWithStreamingResponse:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ [DigitalOcean Floating IPs](https://docs.digitalocean.com/products/networking/reserved-ips/)
+ are publicly-accessible static IP addresses that can be mapped to one of
+ your Droplets. They can be used to create highly available setups or other
+ configurations requiring movable addresses.
+
+ Floating IPs are bound to a specific region.
+ """
+ return FloatingIPsResourceWithStreamingResponse(self._gpu_droplets.floating_ips)
+
+ @cached_property
+ def images(self) -> ImagesResourceWithStreamingResponse:
+ """
+ A DigitalOcean [image](https://docs.digitalocean.com/products/images/) can be
+ used to create a Droplet and may come in a number of flavors. Currently,
+ there are five types of images: snapshots, backups, applications,
+ distributions, and custom images.
+
+ * [Snapshots](https://docs.digitalocean.com/products/snapshots/) provide
+ a full copy of an existing Droplet instance taken on demand.
+
+ * [Backups](https://docs.digitalocean.com/products/backups/) are similar
+ to snapshots but are created automatically at regular intervals when
+ enabled for a Droplet.
+
+ * [Custom images](https://docs.digitalocean.com/products/custom-images/)
+ are Linux-based virtual machine images (raw, qcow2, vhdx, vdi, and vmdk
+ formats are supported) that you may upload for use on DigitalOcean.
+
+ * Distributions are the public Linux distributions that are available to
+ be used as a base to create Droplets.
+
+ * Applications, or [1-Click Apps](https://docs.digitalocean.com/products/marketplace/),
+ are distributions pre-configured with additional software.
+
+ To interact with images, you will generally send requests to the images
+ endpoint at /v2/images.
+ """
+ return ImagesResourceWithStreamingResponse(self._gpu_droplets.images)
+
+ @cached_property
+ def load_balancers(self) -> LoadBalancersResourceWithStreamingResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return LoadBalancersResourceWithStreamingResponse(self._gpu_droplets.load_balancers)
+
+ @cached_property
+ def sizes(self) -> SizesResourceWithStreamingResponse:
+ """
+ The sizes objects represent different packages of hardware resources that
+ can be used for Droplets. When a Droplet is created, a size must be
+ selected so that the correct resources can be allocated.
+
+ Each size represents a plan that bundles together specific sets of
+ resources. This includes the amount of RAM, the number of virtual CPUs,
+ disk space, and transfer. The size object also includes the pricing
+ details and the regions that the size is available in.
+ """
+ return SizesResourceWithStreamingResponse(self._gpu_droplets.sizes)
+
+ @cached_property
+ def snapshots(self) -> SnapshotsResourceWithStreamingResponse:
+ """
+ [Snapshots](https://docs.digitalocean.com/products/snapshots/) are saved
+ instances of a Droplet or a block storage volume, which is reflected in
+ the `resource_type` attribute. In order to avoid problems with compressing
+ filesystems, each defines a `min_disk_size` attribute which is the minimum
+ size of the Droplet or volume disk when creating a new resource from the
+ saved snapshot.
+
+ To interact with snapshots, you will generally send requests to the
+ snapshots endpoint at `/v2/snapshots`.
+ """
+ return SnapshotsResourceWithStreamingResponse(self._gpu_droplets.snapshots)
+
+ @cached_property
+ def volumes(self) -> VolumesResourceWithStreamingResponse:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return VolumesResourceWithStreamingResponse(self._gpu_droplets.volumes)
+
+ @cached_property
+ def account(self) -> AccountResourceWithStreamingResponse:
+ return AccountResourceWithStreamingResponse(self._gpu_droplets.account)
+
+
+class AsyncGPUDropletsResourceWithStreamingResponse:
+ def __init__(self, gpu_droplets: AsyncGPUDropletsResource) -> None:
+ self._gpu_droplets = gpu_droplets
+
+ self.create = async_to_streamed_response_wrapper(
+ gpu_droplets.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ gpu_droplets.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ gpu_droplets.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ gpu_droplets.delete,
+ )
+ self.delete_by_tag = async_to_streamed_response_wrapper(
+ gpu_droplets.delete_by_tag,
+ )
+ self.list_firewalls = async_to_streamed_response_wrapper(
+ gpu_droplets.list_firewalls,
+ )
+ self.list_kernels = async_to_streamed_response_wrapper(
+ gpu_droplets.list_kernels,
+ )
+ self.list_neighbors = async_to_streamed_response_wrapper(
+ gpu_droplets.list_neighbors,
+ )
+ self.list_snapshots = async_to_streamed_response_wrapper(
+ gpu_droplets.list_snapshots,
+ )
+
+ @cached_property
+ def backups(self) -> AsyncBackupsResourceWithStreamingResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return AsyncBackupsResourceWithStreamingResponse(self._gpu_droplets.backups)
+
+ @cached_property
+ def actions(self) -> AsyncActionsResourceWithStreamingResponse:
+ """Droplet actions are tasks that can be executed on a Droplet.
+
+ These can be
+ things like rebooting, resizing, snapshotting, etc.
+
+ Droplet action requests are generally targeted at one of the "actions"
+ endpoints for a specific Droplet. The specific actions are usually
+ initiated by sending a POST request with the action and arguments as
+ parameters.
+
+ Droplet action requests create a Droplet actions object, which can be used
+ to get information about the status of an action. Creating a Droplet
+ action is asynchronous: the HTTP call will return the action object before
+ the action has finished processing on the Droplet. The current status of
+ an action can be retrieved from either the Droplet actions endpoint or the
+ global actions endpoint. If a Droplet action is uncompleted it may block
+ the creation of a subsequent action for that Droplet, the locked attribute
+ of the Droplet will be true and attempts to create a Droplet action will
+ fail with a status of 422.
+ """
+ return AsyncActionsResourceWithStreamingResponse(self._gpu_droplets.actions)
+
+ @cached_property
+ def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse:
+ """
+ A [Droplet](https://docs.digitalocean.com/products/droplets/) is a DigitalOcean
+ virtual machine. By sending requests to the Droplet endpoint, you can
+ list, create, or delete Droplets.
+
+ Some of the attributes will have an object value. The `region` and `image`
+ objects will all contain the standard attributes of their associated
+ types. Find more information about each of these objects in their
+ respective sections.
+ """
+ return AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse(
+ self._gpu_droplets.destroy_with_associated_resources
+ )
+
+ @cached_property
+ def autoscale(self) -> AsyncAutoscaleResourceWithStreamingResponse:
+ """
+ Droplet autoscale pools manage automatic horizontal scaling for your applications based on resource usage (CPU, memory, or both) or a static configuration.
+ """
+ return AsyncAutoscaleResourceWithStreamingResponse(self._gpu_droplets.autoscale)
+
+ @cached_property
+ def firewalls(self) -> AsyncFirewallsResourceWithStreamingResponse:
+ """
+ [DigitalOcean Cloud Firewalls](https://docs.digitalocean.com/products/networking/firewalls/)
+ provide the ability to restrict network access to and from a Droplet
+ allowing you to define which ports will accept inbound or outbound
+ connections. By sending requests to the `/v2/firewalls` endpoint, you can
+ list, create, or delete firewalls as well as modify access rules.
+ """
+ return AsyncFirewallsResourceWithStreamingResponse(self._gpu_droplets.firewalls)
+
+ @cached_property
+ def floating_ips(self) -> AsyncFloatingIPsResourceWithStreamingResponse:
+ """
+ As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs).
+ The Reserved IP product's endpoints function the exact same way as Floating IPs.
+ The only difference is the name change throughout the URLs and fields.
+ For example, the `floating_ips` field is now the `reserved_ips` field.
+ The Floating IP endpoints will remain active until fall 2023 before being
+ permanently deprecated.
+
+ With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects),
+ we will reflect this change as an additional field in the responses across the API
+ where the `floating_ip` field is used. For example, the Droplet metadata response
+ will contain the field `reserved_ips` in addition to the `floating_ips` field.
+ Floating IPs retrieved using the Projects API will retain the original name.
+
+ [DigitalOcean Floating IPs](https://docs.digitalocean.com/products/networking/reserved-ips/)
+ are publicly-accessible static IP addresses that can be mapped to one of
+ your Droplets. They can be used to create highly available setups or other
+ configurations requiring movable addresses.
+
+ Floating IPs are bound to a specific region.
+ """
+ return AsyncFloatingIPsResourceWithStreamingResponse(self._gpu_droplets.floating_ips)
+
+ @cached_property
+ def images(self) -> AsyncImagesResourceWithStreamingResponse:
+ """
+ A DigitalOcean [image](https://docs.digitalocean.com/products/images/) can be
+ used to create a Droplet and may come in a number of flavors. Currently,
+ there are five types of images: snapshots, backups, applications,
+ distributions, and custom images.
+
+ * [Snapshots](https://docs.digitalocean.com/products/snapshots/) provide
+ a full copy of an existing Droplet instance taken on demand.
+
+ * [Backups](https://docs.digitalocean.com/products/backups/) are similar
+ to snapshots but are created automatically at regular intervals when
+ enabled for a Droplet.
+
+ * [Custom images](https://docs.digitalocean.com/products/custom-images/)
+ are Linux-based virtual machine images (raw, qcow2, vhdx, vdi, and vmdk
+ formats are supported) that you may upload for use on DigitalOcean.
+
+ * Distributions are the public Linux distributions that are available to
+ be used as a base to create Droplets.
+
+ * Applications, or [1-Click Apps](https://docs.digitalocean.com/products/marketplace/),
+ are distributions pre-configured with additional software.
+
+ To interact with images, you will generally send requests to the images
+ endpoint at /v2/images.
+ """
+ return AsyncImagesResourceWithStreamingResponse(self._gpu_droplets.images)
+
+ @cached_property
+ def load_balancers(self) -> AsyncLoadBalancersResourceWithStreamingResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return AsyncLoadBalancersResourceWithStreamingResponse(self._gpu_droplets.load_balancers)
+
+ @cached_property
+ def sizes(self) -> AsyncSizesResourceWithStreamingResponse:
+ """
+ The sizes objects represent different packages of hardware resources that
+ can be used for Droplets. When a Droplet is created, a size must be
+ selected so that the correct resources can be allocated.
+
+ Each size represents a plan that bundles together specific sets of
+ resources. This includes the amount of RAM, the number of virtual CPUs,
+ disk space, and transfer. The size object also includes the pricing
+ details and the regions that the size is available in.
+ """
+ return AsyncSizesResourceWithStreamingResponse(self._gpu_droplets.sizes)
+
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResourceWithStreamingResponse:
+ """
+ [Snapshots](https://docs.digitalocean.com/products/snapshots/) are saved
+ instances of a Droplet or a block storage volume, which is reflected in
+ the `resource_type` attribute. In order to avoid problems with compressing
+ filesystems, each defines a `min_disk_size` attribute which is the minimum
+ size of the Droplet or volume disk when creating a new resource from the
+ saved snapshot.
+
+ To interact with snapshots, you will generally send requests to the
+ snapshots endpoint at `/v2/snapshots`.
+ """
+ return AsyncSnapshotsResourceWithStreamingResponse(self._gpu_droplets.snapshots)
+
+ @cached_property
+ def volumes(self) -> AsyncVolumesResourceWithStreamingResponse:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return AsyncVolumesResourceWithStreamingResponse(self._gpu_droplets.volumes)
+
+ @cached_property
+ def account(self) -> AsyncAccountResourceWithStreamingResponse:
+ return AsyncAccountResourceWithStreamingResponse(self._gpu_droplets.account)
diff --git a/src/gradient/resources/gpu_droplets/images/__init__.py b/src/gradient/resources/gpu_droplets/images/__init__.py
new file mode 100644
index 00000000..477fd657
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/images/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .images import (
+ ImagesResource,
+ AsyncImagesResource,
+ ImagesResourceWithRawResponse,
+ AsyncImagesResourceWithRawResponse,
+ ImagesResourceWithStreamingResponse,
+ AsyncImagesResourceWithStreamingResponse,
+)
+from .actions import (
+ ActionsResource,
+ AsyncActionsResource,
+ ActionsResourceWithRawResponse,
+ AsyncActionsResourceWithRawResponse,
+ ActionsResourceWithStreamingResponse,
+ AsyncActionsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "ActionsResource",
+ "AsyncActionsResource",
+ "ActionsResourceWithRawResponse",
+ "AsyncActionsResourceWithRawResponse",
+ "ActionsResourceWithStreamingResponse",
+ "AsyncActionsResourceWithStreamingResponse",
+ "ImagesResource",
+ "AsyncImagesResource",
+ "ImagesResourceWithRawResponse",
+ "AsyncImagesResourceWithRawResponse",
+ "ImagesResourceWithStreamingResponse",
+ "AsyncImagesResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/gpu_droplets/images/actions.py b/src/gradient/resources/gpu_droplets/images/actions.py
new file mode 100644
index 00000000..19c70af8
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/images/actions.py
@@ -0,0 +1,580 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import required_args, maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.shared.action import Action
+from ....types.gpu_droplets.images import action_create_params
+from ....types.gpu_droplets.images.action_list_response import ActionListResponse
+
+__all__ = ["ActionsResource", "AsyncActionsResource"]
+
+
+class ActionsResource(SyncAPIResource):
+ """Image actions are commands that can be given to a DigitalOcean image.
+
+ In
+ general, these requests are made on the actions endpoint of a specific
+ image.
+
+ An image action object is returned. These objects hold the current status
+ of the requested action.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> ActionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ActionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ActionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ActionsResourceWithStreamingResponse(self)
+
+ @overload
+ def create(
+ self,
+ image_id: int,
+ *,
+ type: Literal["convert", "transfer"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Action:
+ """
+ The following actions are available on an Image.
+
+ ## Convert an Image to a Snapshot
+
+ To convert an image, for example, a backup to a snapshot, send a POST request to
+ `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`.
+
+ ## Transfer an Image
+
+ To transfer an image to another region, send a POST request to
+ `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set
+ `region` attribute to the slug identifier of the region you wish to transfer to.
+
+ Args:
+ type: The action to be taken on the image. Can be either `convert` or `transfer`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ image_id: int,
+ *,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ],
+ type: Literal["convert", "transfer"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Action:
+ """
+ The following actions are available on an Image.
+
+ ## Convert an Image to a Snapshot
+
+ To convert an image, for example, a backup to a snapshot, send a POST request to
+ `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`.
+
+ ## Transfer an Image
+
+ To transfer an image to another region, send a POST request to
+ `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set
+ `region` attribute to the slug identifier of the region you wish to transfer to.
+
+ Args:
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ type: The action to be taken on the image. Can be either `convert` or `transfer`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["type"], ["region", "type"])
+ def create(
+ self,
+ image_id: int,
+ *,
+ type: Literal["convert", "transfer"],
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Action:
+ return self._post(
+ f"/v2/images/{image_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}/actions",
+ body=maybe_transform(
+ {
+ "type": type,
+ "region": region,
+ },
+ action_create_params.ActionCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Action,
+ )
+
+ def retrieve(
+ self,
+ action_id: int,
+ *,
+ image_id: int,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Action:
+ """
+ To retrieve the status of an image action, send a GET request to
+ `/v2/images/$IMAGE_ID/actions/$IMAGE_ACTION_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/images/{image_id}/actions/{action_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}/actions/{action_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Action,
+ )
+
+ def list(
+ self,
+ image_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionListResponse:
+ """
+ To retrieve all actions that have been executed on an image, send a GET request
+ to `/v2/images/$IMAGE_ID/actions`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/images/{image_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}/actions",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionListResponse,
+ )
+
+
+class AsyncActionsResource(AsyncAPIResource):
+ """Image actions are commands that can be given to a DigitalOcean image.
+
+ In
+ general, these requests are made on the actions endpoint of a specific
+ image.
+
+ An image action object is returned. These objects hold the current status
+ of the requested action.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncActionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncActionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncActionsResourceWithStreamingResponse(self)
+
+ @overload
+ async def create(
+ self,
+ image_id: int,
+ *,
+ type: Literal["convert", "transfer"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Action:
+ """
+ The following actions are available on an Image.
+
+ ## Convert an Image to a Snapshot
+
+ To convert an image, for example, a backup to a snapshot, send a POST request to
+ `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`.
+
+ ## Transfer an Image
+
+ To transfer an image to another region, send a POST request to
+ `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set
+ `region` attribute to the slug identifier of the region you wish to transfer to.
+
+ Args:
+ type: The action to be taken on the image. Can be either `convert` or `transfer`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ image_id: int,
+ *,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ],
+ type: Literal["convert", "transfer"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Action:
+ """
+ The following actions are available on an Image.
+
+ ## Convert an Image to a Snapshot
+
+ To convert an image, for example, a backup to a snapshot, send a POST request to
+ `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`.
+
+ ## Transfer an Image
+
+ To transfer an image to another region, send a POST request to
+ `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set
+ `region` attribute to the slug identifier of the region you wish to transfer to.
+
+ Args:
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ type: The action to be taken on the image. Can be either `convert` or `transfer`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["type"], ["region", "type"])
+ async def create(
+ self,
+ image_id: int,
+ *,
+ type: Literal["convert", "transfer"],
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Action:
+ return await self._post(
+ f"/v2/images/{image_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}/actions",
+ body=await async_maybe_transform(
+ {
+ "type": type,
+ "region": region,
+ },
+ action_create_params.ActionCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Action,
+ )
+
+ async def retrieve(
+ self,
+ action_id: int,
+ *,
+ image_id: int,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Action:
+ """
+ To retrieve the status of an image action, send a GET request to
+ `/v2/images/$IMAGE_ID/actions/$IMAGE_ACTION_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/images/{image_id}/actions/{action_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}/actions/{action_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Action,
+ )
+
+ async def list(
+ self,
+ image_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionListResponse:
+ """
+ To retrieve all actions that have been executed on an image, send a GET request
+ to `/v2/images/$IMAGE_ID/actions`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/images/{image_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}/actions",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ActionListResponse,
+ )
+
+
+class ActionsResourceWithRawResponse:
+ def __init__(self, actions: ActionsResource) -> None:
+ self._actions = actions
+
+ self.create = to_raw_response_wrapper(
+ actions.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ actions.list,
+ )
+
+
+class AsyncActionsResourceWithRawResponse:
+ def __init__(self, actions: AsyncActionsResource) -> None:
+ self._actions = actions
+
+ self.create = async_to_raw_response_wrapper(
+ actions.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ actions.list,
+ )
+
+
+class ActionsResourceWithStreamingResponse:
+ def __init__(self, actions: ActionsResource) -> None:
+ self._actions = actions
+
+ self.create = to_streamed_response_wrapper(
+ actions.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ actions.list,
+ )
+
+
+class AsyncActionsResourceWithStreamingResponse:
+ def __init__(self, actions: AsyncActionsResource) -> None:
+ self._actions = actions
+
+ self.create = async_to_streamed_response_wrapper(
+ actions.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ actions.list,
+ )
diff --git a/src/gradient/resources/gpu_droplets/images/images.py b/src/gradient/resources/gpu_droplets/images/images.py
new file mode 100644
index 00000000..d9a904cf
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/images/images.py
@@ -0,0 +1,975 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal
+
+import httpx
+
+from .actions import (
+ ActionsResource,
+ AsyncActionsResource,
+ ActionsResourceWithRawResponse,
+ AsyncActionsResourceWithRawResponse,
+ ActionsResourceWithStreamingResponse,
+ AsyncActionsResourceWithStreamingResponse,
+)
+from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets import image_list_params, image_create_params, image_update_params
+from ....types.gpu_droplets.image_list_response import ImageListResponse
+from ....types.gpu_droplets.image_create_response import ImageCreateResponse
+from ....types.gpu_droplets.image_update_response import ImageUpdateResponse
+from ....types.gpu_droplets.image_retrieve_response import ImageRetrieveResponse
+
+__all__ = ["ImagesResource", "AsyncImagesResource"]
+
+
+class ImagesResource(SyncAPIResource):
+ """
+ A DigitalOcean [image](https://docs.digitalocean.com/products/images/) can be
+ used to create a Droplet and may come in a number of flavors. Currently,
+ there are five types of images: snapshots, backups, applications,
+ distributions, and custom images.
+
+ * [Snapshots](https://docs.digitalocean.com/products/snapshots/) provide
+ a full copy of an existing Droplet instance taken on demand.
+
+ * [Backups](https://docs.digitalocean.com/products/backups/) are similar
+ to snapshots but are created automatically at regular intervals when
+ enabled for a Droplet.
+
+ * [Custom images](https://docs.digitalocean.com/products/custom-images/)
+ are Linux-based virtual machine images (raw, qcow2, vhdx, vdi, and vmdk
+ formats are supported) that you may upload for use on DigitalOcean.
+
+ * Distributions are the public Linux distributions that are available to
+ be used as a base to create Droplets.
+
+ * Applications, or [1-Click Apps](https://docs.digitalocean.com/products/marketplace/),
+ are distributions pre-configured with additional software.
+
+ To interact with images, you will generally send requests to the images
+ endpoint at /v2/images.
+ """
+
+ @cached_property
+ def actions(self) -> ActionsResource:
+ """Image actions are commands that can be given to a DigitalOcean image.
+
+ In
+ general, these requests are made on the actions endpoint of a specific
+ image.
+
+ An image action object is returned. These objects hold the current status
+ of the requested action.
+ """
+ return ActionsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ImagesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ImagesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ImagesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ImagesResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ description: str | Omit = omit,
+ distribution: Literal[
+ "Arch Linux",
+ "CentOS",
+ "CoreOS",
+ "Debian",
+ "Fedora",
+ "Fedora Atomic",
+ "FreeBSD",
+ "Gentoo",
+ "openSUSE",
+ "RancherOS",
+ "Rocky Linux",
+ "Ubuntu",
+ "Unknown",
+ ]
+ | Omit = omit,
+ name: str | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ url: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageCreateResponse:
+ """To create a new custom image, send a POST request to /v2/images.
+
+ The body must
+ contain a url attribute pointing to a Linux virtual machine image to be imported
+ into DigitalOcean. The image must be in the raw, qcow2, vhdx, vdi, or vmdk
+ format. It may be compressed using gzip or bzip2 and must be smaller than 100 GB
+ after being decompressed.
+
+ Args:
+ description: An optional free-form text field to describe an image.
+
+ distribution: The name of a custom image's distribution. Currently, the valid values are
+ `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`,
+ `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and
+ `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be
+ used in its place.
+
+ name: The display name that has been given to an image. This is what is shown in the
+ control panel and is generally a descriptive title for the image in question.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ url: A URL from which the custom Linux virtual machine image may be retrieved. The
+ image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It may
+ be compressed using gzip or bzip2 and must be smaller than 100 GB after being
+ decompressed.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images",
+ body=maybe_transform(
+ {
+ "description": description,
+ "distribution": distribution,
+ "name": name,
+ "region": region,
+ "tags": tags,
+ "url": url,
+ },
+ image_create_params.ImageCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImageCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ image_id: Union[int, str],
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageRetrieveResponse:
+ """
+ To retrieve information about an image, send a `GET` request to
+ `/v2/images/$IDENTIFIER`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/images/{image_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImageRetrieveResponse,
+ )
+
+ def update(
+ self,
+ image_id: int,
+ *,
+ description: str | Omit = omit,
+ distribution: Literal[
+ "Arch Linux",
+ "CentOS",
+ "CoreOS",
+ "Debian",
+ "Fedora",
+ "Fedora Atomic",
+ "FreeBSD",
+ "Gentoo",
+ "openSUSE",
+ "RancherOS",
+ "Rocky Linux",
+ "Ubuntu",
+ "Unknown",
+ ]
+ | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageUpdateResponse:
+ """To update an image, send a `PUT` request to `/v2/images/$IMAGE_ID`.
+
+ Set the
+ `name` attribute to the new value you would like to use. For custom images, the
+ `description` and `distribution` attributes may also be updated.
+
+ Args:
+ description: An optional free-form text field to describe an image.
+
+ distribution: The name of a custom image's distribution. Currently, the valid values are
+ `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`,
+ `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and
+ `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be
+ used in its place.
+
+ name: The display name that has been given to an image. This is what is shown in the
+ control panel and is generally a descriptive title for the image in question.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._put(
+ f"/v2/images/{image_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}",
+ body=maybe_transform(
+ {
+ "description": description,
+ "distribution": distribution,
+ "name": name,
+ },
+ image_update_params.ImageUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImageUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ private: bool | Omit = omit,
+ tag_name: str | Omit = omit,
+ type: Literal["application", "distribution"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageListResponse:
+ """
+ To list all of the images available on your account, send a GET request to
+ /v2/images.
+
+ ## Filtering Results
+
+ ---
+
+ It's possible to request filtered results by including certain query parameters.
+
+ **Image Type**
+
+ Either 1-Click Application or OS Distribution images can be filtered by using
+ the `type` query parameter.
+
+ > Important: The `type` query parameter does not directly relate to the `type`
+ > attribute.
+
+ To retrieve only **_distribution_** images, include the `type` query parameter
+ set to distribution, `/v2/images?type=distribution`.
+
+ To retrieve only **_application_** images, include the `type` query parameter
+ set to application, `/v2/images?type=application`.
+
+ **User Images**
+
+ To retrieve only the private images of a user, include the `private` query
+ parameter set to true, `/v2/images?private=true`.
+
+ **Tags**
+
+ To list all images assigned to a specific tag, include the `tag_name` query
+ parameter set to the name of the tag in your GET request. For example,
+ `/v2/images?tag_name=$TAG_NAME`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ private: Used to filter only user images.
+
+ tag_name: Used to filter images by a specific tag.
+
+ type: Filters results based on image type which can be either `application` or
+ `distribution`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ "private": private,
+ "tag_name": tag_name,
+ "type": type,
+ },
+ image_list_params.ImageListParams,
+ ),
+ ),
+ cast_to=ImageListResponse,
+ )
+
+ def delete(
+ self,
+ image_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a snapshot or custom image, send a `DELETE` request to
+ `/v2/images/$IMAGE_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/images/{image_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncImagesResource(AsyncAPIResource):
+ """
+ A DigitalOcean [image](https://docs.digitalocean.com/products/images/) can be
+ used to create a Droplet and may come in a number of flavors. Currently,
+ there are five types of images: snapshots, backups, applications,
+ distributions, and custom images.
+
+ * [Snapshots](https://docs.digitalocean.com/products/snapshots/) provide
+ a full copy of an existing Droplet instance taken on demand.
+
+ * [Backups](https://docs.digitalocean.com/products/backups/) are similar
+ to snapshots but are created automatically at regular intervals when
+ enabled for a Droplet.
+
+ * [Custom images](https://docs.digitalocean.com/products/custom-images/)
+ are Linux-based virtual machine images (raw, qcow2, vhdx, vdi, and vmdk
+ formats are supported) that you may upload for use on DigitalOcean.
+
+ * Distributions are the public Linux distributions that are available to
+ be used as a base to create Droplets.
+
+ * Applications, or [1-Click Apps](https://docs.digitalocean.com/products/marketplace/),
+ are distributions pre-configured with additional software.
+
+ To interact with images, you will generally send requests to the images
+ endpoint at /v2/images.
+ """
+
+ @cached_property
+ def actions(self) -> AsyncActionsResource:
+ """Image actions are commands that can be given to a DigitalOcean image.
+
+ In
+ general, these requests are made on the actions endpoint of a specific
+ image.
+
+ An image action object is returned. These objects hold the current status
+ of the requested action.
+ """
+ return AsyncActionsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncImagesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncImagesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncImagesResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ description: str | Omit = omit,
+ distribution: Literal[
+ "Arch Linux",
+ "CentOS",
+ "CoreOS",
+ "Debian",
+ "Fedora",
+ "Fedora Atomic",
+ "FreeBSD",
+ "Gentoo",
+ "openSUSE",
+ "RancherOS",
+ "Rocky Linux",
+ "Ubuntu",
+ "Unknown",
+ ]
+ | Omit = omit,
+ name: str | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ url: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageCreateResponse:
+ """To create a new custom image, send a POST request to /v2/images.
+
+ The body must
+ contain a url attribute pointing to a Linux virtual machine image to be imported
+ into DigitalOcean. The image must be in the raw, qcow2, vhdx, vdi, or vmdk
+ format. It may be compressed using gzip or bzip2 and must be smaller than 100 GB
+ after being decompressed.
+
+ Args:
+ description: An optional free-form text field to describe an image.
+
+ distribution: The name of a custom image's distribution. Currently, the valid values are
+ `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`,
+ `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and
+ `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be
+ used in its place.
+
+ name: The display name that has been given to an image. This is what is shown in the
+ control panel and is generally a descriptive title for the image in question.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ url: A URL from which the custom Linux virtual machine image may be retrieved. The
+ image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It may
+ be compressed using gzip or bzip2 and must be smaller than 100 GB after being
+ decompressed.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images",
+ body=await async_maybe_transform(
+ {
+ "description": description,
+ "distribution": distribution,
+ "name": name,
+ "region": region,
+ "tags": tags,
+ "url": url,
+ },
+ image_create_params.ImageCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImageCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ image_id: Union[int, str],
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageRetrieveResponse:
+ """
+ To retrieve information about an image, send a `GET` request to
+ `/v2/images/$IDENTIFIER`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/images/{image_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImageRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ image_id: int,
+ *,
+ description: str | Omit = omit,
+ distribution: Literal[
+ "Arch Linux",
+ "CentOS",
+ "CoreOS",
+ "Debian",
+ "Fedora",
+ "Fedora Atomic",
+ "FreeBSD",
+ "Gentoo",
+ "openSUSE",
+ "RancherOS",
+ "Rocky Linux",
+ "Ubuntu",
+ "Unknown",
+ ]
+ | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageUpdateResponse:
+ """To update an image, send a `PUT` request to `/v2/images/$IMAGE_ID`.
+
+ Set the
+ `name` attribute to the new value you would like to use. For custom images, the
+ `description` and `distribution` attributes may also be updated.
+
+ Args:
+ description: An optional free-form text field to describe an image.
+
+ distribution: The name of a custom image's distribution. Currently, the valid values are
+ `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`,
+ `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and
+ `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be
+ used in its place.
+
+ name: The display name that has been given to an image. This is what is shown in the
+ control panel and is generally a descriptive title for the image in question.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._put(
+ f"/v2/images/{image_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}",
+ body=await async_maybe_transform(
+ {
+ "description": description,
+ "distribution": distribution,
+ "name": name,
+ },
+ image_update_params.ImageUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImageUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ private: bool | Omit = omit,
+ tag_name: str | Omit = omit,
+ type: Literal["application", "distribution"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageListResponse:
+ """
+ To list all of the images available on your account, send a GET request to
+ /v2/images.
+
+ ## Filtering Results
+
+ ---
+
+ It's possible to request filtered results by including certain query parameters.
+
+ **Image Type**
+
+ Either 1-Click Application or OS Distribution images can be filtered by using
+ the `type` query parameter.
+
+ > Important: The `type` query parameter does not directly relate to the `type`
+ > attribute.
+
+ To retrieve only **_distribution_** images, include the `type` query parameter
+ set to distribution, `/v2/images?type=distribution`.
+
+ To retrieve only **_application_** images, include the `type` query parameter
+ set to application, `/v2/images?type=application`.
+
+ **User Images**
+
+ To retrieve only the private images of a user, include the `private` query
+ parameter set to true, `/v2/images?private=true`.
+
+ **Tags**
+
+ To list all images assigned to a specific tag, include the `tag_name` query
+ parameter set to the name of the tag in your GET request. For example,
+ `/v2/images?tag_name=$TAG_NAME`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ private: Used to filter only user images.
+
+ tag_name: Used to filter images by a specific tag.
+
+ type: Filters results based on image type which can be either `application` or
+ `distribution`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ "private": private,
+ "tag_name": tag_name,
+ "type": type,
+ },
+ image_list_params.ImageListParams,
+ ),
+ ),
+ cast_to=ImageListResponse,
+ )
+
+ async def delete(
+ self,
+ image_id: int,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a snapshot or custom image, send a `DELETE` request to
+ `/v2/images/$IMAGE_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/images/{image_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/images/{image_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class ImagesResourceWithRawResponse:
+ def __init__(self, images: ImagesResource) -> None:
+ self._images = images
+
+ self.create = to_raw_response_wrapper(
+ images.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ images.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ images.update,
+ )
+ self.list = to_raw_response_wrapper(
+ images.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ images.delete,
+ )
+
+ @cached_property
+ def actions(self) -> ActionsResourceWithRawResponse:
+ """Image actions are commands that can be given to a DigitalOcean image.
+
+ In
+ general, these requests are made on the actions endpoint of a specific
+ image.
+
+ An image action object is returned. These objects hold the current status
+ of the requested action.
+ """
+ return ActionsResourceWithRawResponse(self._images.actions)
+
+
+class AsyncImagesResourceWithRawResponse:
+ def __init__(self, images: AsyncImagesResource) -> None:
+ self._images = images
+
+ self.create = async_to_raw_response_wrapper(
+ images.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ images.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ images.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ images.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ images.delete,
+ )
+
+ @cached_property
+ def actions(self) -> AsyncActionsResourceWithRawResponse:
+ """Image actions are commands that can be given to a DigitalOcean image.
+
+ In
+ general, these requests are made on the actions endpoint of a specific
+ image.
+
+ An image action object is returned. These objects hold the current status
+ of the requested action.
+ """
+ return AsyncActionsResourceWithRawResponse(self._images.actions)
+
+
+class ImagesResourceWithStreamingResponse:
+ def __init__(self, images: ImagesResource) -> None:
+ self._images = images
+
+ self.create = to_streamed_response_wrapper(
+ images.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ images.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ images.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ images.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ images.delete,
+ )
+
+ @cached_property
+ def actions(self) -> ActionsResourceWithStreamingResponse:
+ """Image actions are commands that can be given to a DigitalOcean image.
+
+ In
+ general, these requests are made on the actions endpoint of a specific
+ image.
+
+ An image action object is returned. These objects hold the current status
+ of the requested action.
+ """
+ return ActionsResourceWithStreamingResponse(self._images.actions)
+
+
+class AsyncImagesResourceWithStreamingResponse:
+ def __init__(self, images: AsyncImagesResource) -> None:
+ self._images = images
+
+ self.create = async_to_streamed_response_wrapper(
+ images.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ images.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ images.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ images.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ images.delete,
+ )
+
+ @cached_property
+ def actions(self) -> AsyncActionsResourceWithStreamingResponse:
+ """Image actions are commands that can be given to a DigitalOcean image.
+
+ In
+ general, these requests are made on the actions endpoint of a specific
+ image.
+
+ An image action object is returned. These objects hold the current status
+ of the requested action.
+ """
+ return AsyncActionsResourceWithStreamingResponse(self._images.actions)
diff --git a/src/gradient/resources/gpu_droplets/load_balancers/__init__.py b/src/gradient/resources/gpu_droplets/load_balancers/__init__.py
new file mode 100644
index 00000000..2cede1c8
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/load_balancers/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .droplets import (
+ DropletsResource,
+ AsyncDropletsResource,
+ DropletsResourceWithRawResponse,
+ AsyncDropletsResourceWithRawResponse,
+ DropletsResourceWithStreamingResponse,
+ AsyncDropletsResourceWithStreamingResponse,
+)
+from .load_balancers import (
+ LoadBalancersResource,
+ AsyncLoadBalancersResource,
+ LoadBalancersResourceWithRawResponse,
+ AsyncLoadBalancersResourceWithRawResponse,
+ LoadBalancersResourceWithStreamingResponse,
+ AsyncLoadBalancersResourceWithStreamingResponse,
+)
+from .forwarding_rules import (
+ ForwardingRulesResource,
+ AsyncForwardingRulesResource,
+ ForwardingRulesResourceWithRawResponse,
+ AsyncForwardingRulesResourceWithRawResponse,
+ ForwardingRulesResourceWithStreamingResponse,
+ AsyncForwardingRulesResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "DropletsResource",
+ "AsyncDropletsResource",
+ "DropletsResourceWithRawResponse",
+ "AsyncDropletsResourceWithRawResponse",
+ "DropletsResourceWithStreamingResponse",
+ "AsyncDropletsResourceWithStreamingResponse",
+ "ForwardingRulesResource",
+ "AsyncForwardingRulesResource",
+ "ForwardingRulesResourceWithRawResponse",
+ "AsyncForwardingRulesResourceWithRawResponse",
+ "ForwardingRulesResourceWithStreamingResponse",
+ "AsyncForwardingRulesResourceWithStreamingResponse",
+ "LoadBalancersResource",
+ "AsyncLoadBalancersResource",
+ "LoadBalancersResourceWithRawResponse",
+ "AsyncLoadBalancersResourceWithRawResponse",
+ "LoadBalancersResourceWithStreamingResponse",
+ "AsyncLoadBalancersResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/gpu_droplets/load_balancers/droplets.py b/src/gradient/resources/gpu_droplets/load_balancers/droplets.py
new file mode 100644
index 00000000..9f4b3e5e
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/load_balancers/droplets.py
@@ -0,0 +1,318 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+
+import httpx
+
+from ...._types import Body, Query, Headers, NoneType, NotGiven, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets.load_balancers import droplet_add_params, droplet_remove_params
+
+__all__ = ["DropletsResource", "AsyncDropletsResource"]
+
+
+class DropletsResource(SyncAPIResource):
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> DropletsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return DropletsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> DropletsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return DropletsResourceWithStreamingResponse(self)
+
+ def add(
+ self,
+ lb_id: str,
+ *,
+ droplet_ids: Iterable[int],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To assign a Droplet to a load balancer instance, send a POST request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request,
+ there should be a `droplet_ids` attribute containing a list of Droplet IDs.
+ Individual Droplets can not be added to a load balancer configured with a
+ Droplet tag. Attempting to do so will result in a "422 Unprocessable Entity"
+ response from the API.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._post(
+ f"/v2/load_balancers/{lb_id}/droplets"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets",
+ body=maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def remove(
+ self,
+ lb_id: str,
+ *,
+ droplet_ids: Iterable[int],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To remove a Droplet from a load balancer instance, send a DELETE request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request,
+ there should be a `droplet_ids` attribute containing a list of Droplet IDs.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/load_balancers/{lb_id}/droplets"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets",
+ body=maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncDropletsResource(AsyncAPIResource):
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncDropletsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncDropletsResourceWithStreamingResponse(self)
+
+ async def add(
+ self,
+ lb_id: str,
+ *,
+ droplet_ids: Iterable[int],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To assign a Droplet to a load balancer instance, send a POST request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request,
+ there should be a `droplet_ids` attribute containing a list of Droplet IDs.
+ Individual Droplets can not be added to a load balancer configured with a
+ Droplet tag. Attempting to do so will result in a "422 Unprocessable Entity"
+ response from the API.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._post(
+ f"/v2/load_balancers/{lb_id}/droplets"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets",
+ body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def remove(
+ self,
+ lb_id: str,
+ *,
+ droplet_ids: Iterable[int],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To remove a Droplet from a load balancer instance, send a DELETE request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request,
+ there should be a `droplet_ids` attribute containing a list of Droplet IDs.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/load_balancers/{lb_id}/droplets"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets",
+ body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class DropletsResourceWithRawResponse:
+ def __init__(self, droplets: DropletsResource) -> None:
+ self._droplets = droplets
+
+ self.add = to_raw_response_wrapper(
+ droplets.add,
+ )
+ self.remove = to_raw_response_wrapper(
+ droplets.remove,
+ )
+
+
+class AsyncDropletsResourceWithRawResponse:
+ def __init__(self, droplets: AsyncDropletsResource) -> None:
+ self._droplets = droplets
+
+ self.add = async_to_raw_response_wrapper(
+ droplets.add,
+ )
+ self.remove = async_to_raw_response_wrapper(
+ droplets.remove,
+ )
+
+
+class DropletsResourceWithStreamingResponse:
+ def __init__(self, droplets: DropletsResource) -> None:
+ self._droplets = droplets
+
+ self.add = to_streamed_response_wrapper(
+ droplets.add,
+ )
+ self.remove = to_streamed_response_wrapper(
+ droplets.remove,
+ )
+
+
+class AsyncDropletsResourceWithStreamingResponse:
+ def __init__(self, droplets: AsyncDropletsResource) -> None:
+ self._droplets = droplets
+
+ self.add = async_to_streamed_response_wrapper(
+ droplets.add,
+ )
+ self.remove = async_to_streamed_response_wrapper(
+ droplets.remove,
+ )
diff --git a/src/gradient/resources/gpu_droplets/load_balancers/forwarding_rules.py b/src/gradient/resources/gpu_droplets/load_balancers/forwarding_rules.py
new file mode 100644
index 00000000..51842e8c
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/load_balancers/forwarding_rules.py
@@ -0,0 +1,317 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+
+import httpx
+
+from ...._types import Body, Query, Headers, NoneType, NotGiven, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets.load_balancers import forwarding_rule_add_params, forwarding_rule_remove_params
+from ....types.gpu_droplets.forwarding_rule_param import ForwardingRuleParam
+
+__all__ = ["ForwardingRulesResource", "AsyncForwardingRulesResource"]
+
+
+class ForwardingRulesResource(SyncAPIResource):
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> ForwardingRulesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ForwardingRulesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ForwardingRulesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ForwardingRulesResourceWithStreamingResponse(self)
+
+ def add(
+ self,
+ lb_id: str,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To add an additional forwarding rule to a load balancer instance, send a POST
+ request to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body
+ of the request, there should be a `forwarding_rules` attribute containing an
+ array of rules to be added.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._post(
+ f"/v2/load_balancers/{lb_id}/forwarding_rules"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules",
+ body=maybe_transform(
+ {"forwarding_rules": forwarding_rules}, forwarding_rule_add_params.ForwardingRuleAddParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def remove(
+ self,
+ lb_id: str,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To remove forwarding rules from a load balancer instance, send a DELETE request
+ to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body of the
+ request, there should be a `forwarding_rules` attribute containing an array of
+ rules to be removed.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/load_balancers/{lb_id}/forwarding_rules"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules",
+ body=maybe_transform(
+ {"forwarding_rules": forwarding_rules}, forwarding_rule_remove_params.ForwardingRuleRemoveParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncForwardingRulesResource(AsyncAPIResource):
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncForwardingRulesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncForwardingRulesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncForwardingRulesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncForwardingRulesResourceWithStreamingResponse(self)
+
+ async def add(
+ self,
+ lb_id: str,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To add an additional forwarding rule to a load balancer instance, send a POST
+ request to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body
+ of the request, there should be a `forwarding_rules` attribute containing an
+ array of rules to be added.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._post(
+ f"/v2/load_balancers/{lb_id}/forwarding_rules"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules",
+ body=await async_maybe_transform(
+ {"forwarding_rules": forwarding_rules}, forwarding_rule_add_params.ForwardingRuleAddParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def remove(
+ self,
+ lb_id: str,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To remove forwarding rules from a load balancer instance, send a DELETE request
+ to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body of the
+ request, there should be a `forwarding_rules` attribute containing an array of
+ rules to be removed.
+
+ No response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/load_balancers/{lb_id}/forwarding_rules"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules",
+ body=await async_maybe_transform(
+ {"forwarding_rules": forwarding_rules}, forwarding_rule_remove_params.ForwardingRuleRemoveParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class ForwardingRulesResourceWithRawResponse:
+ def __init__(self, forwarding_rules: ForwardingRulesResource) -> None:
+ self._forwarding_rules = forwarding_rules
+
+ self.add = to_raw_response_wrapper(
+ forwarding_rules.add,
+ )
+ self.remove = to_raw_response_wrapper(
+ forwarding_rules.remove,
+ )
+
+
+class AsyncForwardingRulesResourceWithRawResponse:
+ def __init__(self, forwarding_rules: AsyncForwardingRulesResource) -> None:
+ self._forwarding_rules = forwarding_rules
+
+ self.add = async_to_raw_response_wrapper(
+ forwarding_rules.add,
+ )
+ self.remove = async_to_raw_response_wrapper(
+ forwarding_rules.remove,
+ )
+
+
+class ForwardingRulesResourceWithStreamingResponse:
+ def __init__(self, forwarding_rules: ForwardingRulesResource) -> None:
+ self._forwarding_rules = forwarding_rules
+
+ self.add = to_streamed_response_wrapper(
+ forwarding_rules.add,
+ )
+ self.remove = to_streamed_response_wrapper(
+ forwarding_rules.remove,
+ )
+
+
+class AsyncForwardingRulesResourceWithStreamingResponse:
+ def __init__(self, forwarding_rules: AsyncForwardingRulesResource) -> None:
+ self._forwarding_rules = forwarding_rules
+
+ self.add = async_to_streamed_response_wrapper(
+ forwarding_rules.add,
+ )
+ self.remove = async_to_streamed_response_wrapper(
+ forwarding_rules.remove,
+ )
diff --git a/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py b/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py
new file mode 100644
index 00000000..1316036b
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py
@@ -0,0 +1,2305 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, overload
+
+import httpx
+
+from .droplets import (
+ DropletsResource,
+ AsyncDropletsResource,
+ DropletsResourceWithRawResponse,
+ AsyncDropletsResourceWithRawResponse,
+ DropletsResourceWithStreamingResponse,
+ AsyncDropletsResourceWithStreamingResponse,
+)
+from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
+from ...._utils import required_args, maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from .forwarding_rules import (
+ ForwardingRulesResource,
+ AsyncForwardingRulesResource,
+ ForwardingRulesResourceWithRawResponse,
+ AsyncForwardingRulesResourceWithRawResponse,
+ ForwardingRulesResourceWithStreamingResponse,
+ AsyncForwardingRulesResourceWithStreamingResponse,
+)
+from ....types.gpu_droplets import (
+ load_balancer_list_params,
+ load_balancer_create_params,
+ load_balancer_update_params,
+)
+from ....types.gpu_droplets.domains_param import DomainsParam
+from ....types.gpu_droplets.lb_firewall_param import LbFirewallParam
+from ....types.gpu_droplets.glb_settings_param import GlbSettingsParam
+from ....types.gpu_droplets.health_check_param import HealthCheckParam
+from ....types.gpu_droplets.forwarding_rule_param import ForwardingRuleParam
+from ....types.gpu_droplets.sticky_sessions_param import StickySessionsParam
+from ....types.gpu_droplets.load_balancer_list_response import LoadBalancerListResponse
+from ....types.gpu_droplets.load_balancer_create_response import LoadBalancerCreateResponse
+from ....types.gpu_droplets.load_balancer_update_response import LoadBalancerUpdateResponse
+from ....types.gpu_droplets.load_balancer_retrieve_response import LoadBalancerRetrieveResponse
+
+__all__ = ["LoadBalancersResource", "AsyncLoadBalancersResource"]
+
+
+class LoadBalancersResource(SyncAPIResource):
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+
+ @cached_property
+ def droplets(self) -> DropletsResource:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return DropletsResource(self._client)
+
+ @cached_property
+ def forwarding_rules(self) -> ForwardingRulesResource:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return ForwardingRulesResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> LoadBalancersResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return LoadBalancersResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> LoadBalancersResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return LoadBalancersResourceWithStreamingResponse(self)
+
+ @overload
+ def create(
+ self,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ droplet_ids: Iterable[int] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerCreateResponse:
+ """
+ To create a new load balancer instance, send a POST request to
+ `/v2/load_balancers`.
+
+ You can specify the Droplets that will sit behind the load balancer using one of
+ two methods:
+
+ - Set `droplet_ids` to a list of specific Droplet IDs.
+ - Set `tag` to the name of a tag. All Droplets with this tag applied will be
+ assigned to the load balancer. Additional Droplets will be automatically
+ assigned as they are tagged.
+
+ These methods are mutually exclusive.
+
+ Args:
+ forwarding_rules: An array of objects specifying the forwarding rules for a load balancer.
+
+ algorithm: This field has been deprecated. You can no longer specify an algorithm for load
+ balancers.
+
+ disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+
+ domains: An array of objects specifying the domain configurations for a Global load
+ balancer.
+
+ droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer.
+
+ enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+
+ enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use.
+
+ firewall: An object specifying allow and deny rules to control traffic to the load
+ balancer.
+
+ glb_settings: An object specifying forwarding configurations for a Global load balancer.
+
+ health_check: An object specifying health check settings for the load balancer.
+
+ http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+
+ name: A human-readable name for a load balancer instance.
+
+ network: A string indicating whether the load balancer should be external or internal.
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+
+ network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+
+ project_id: The ID of the project that the load balancer is associated with. If no ID is
+ provided at creation, the load balancer associates with the user's default
+ project. If an invalid project ID is provided, the load balancer will not be
+ created.
+
+ redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size: This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+
+ size_unit: How many nodes the load balancer contains. Each additional node increases the
+ load balancer's ability to manage more connections. Load balancers can be scaled
+ up or down, and you can change the number of nodes after creation up to once per
+ hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions.
+ Use the `size` field to scale load balancers that reside in these regions.
+
+ sticky_sessions: An object specifying sticky sessions settings for the load balancer.
+
+ target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+
+ tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+
+ type: A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ tag: str | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerCreateResponse:
+ """
+ To create a new load balancer instance, send a POST request to
+ `/v2/load_balancers`.
+
+ You can specify the Droplets that will sit behind the load balancer using one of
+ two methods:
+
+ - Set `droplet_ids` to a list of specific Droplet IDs.
+ - Set `tag` to the name of a tag. All Droplets with this tag applied will be
+ assigned to the load balancer. Additional Droplets will be automatically
+ assigned as they are tagged.
+
+ These methods are mutually exclusive.
+
+ Args:
+ forwarding_rules: An array of objects specifying the forwarding rules for a load balancer.
+
+ algorithm: This field has been deprecated. You can no longer specify an algorithm for load
+ balancers.
+
+ disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+
+ domains: An array of objects specifying the domain configurations for a Global load
+ balancer.
+
+ enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+
+ enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use.
+
+ firewall: An object specifying allow and deny rules to control traffic to the load
+ balancer.
+
+ glb_settings: An object specifying forwarding configurations for a Global load balancer.
+
+ health_check: An object specifying health check settings for the load balancer.
+
+ http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+
+ name: A human-readable name for a load balancer instance.
+
+ network: A string indicating whether the load balancer should be external or internal.
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+
+ network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+
+ project_id: The ID of the project that the load balancer is associated with. If no ID is
+ provided at creation, the load balancer associates with the user's default
+ project. If an invalid project ID is provided, the load balancer will not be
+ created.
+
+ redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size: This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+
+ size_unit: How many nodes the load balancer contains. Each additional node increases the
+ load balancer's ability to manage more connections. Load balancers can be scaled
+ up or down, and you can change the number of nodes after creation up to once per
+ hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions.
+ Use the `size` field to scale load balancers that reside in these regions.
+
+ sticky_sessions: An object specifying sticky sessions settings for the load balancer.
+
+ tag: The name of a Droplet tag corresponding to Droplets assigned to the load
+ balancer.
+
+ target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+
+ tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+
+ type: A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["forwarding_rules"])
+ def create(
+ self,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ droplet_ids: Iterable[int] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ tag: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerCreateResponse:
+ return self._post(
+ "/v2/load_balancers"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/load_balancers",
+ body=maybe_transform(
+ {
+ "forwarding_rules": forwarding_rules,
+ "algorithm": algorithm,
+ "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records,
+ "domains": domains,
+ "droplet_ids": droplet_ids,
+ "enable_backend_keepalive": enable_backend_keepalive,
+ "enable_proxy_protocol": enable_proxy_protocol,
+ "firewall": firewall,
+ "glb_settings": glb_settings,
+ "health_check": health_check,
+ "http_idle_timeout_seconds": http_idle_timeout_seconds,
+ "name": name,
+ "network": network,
+ "network_stack": network_stack,
+ "project_id": project_id,
+ "redirect_http_to_https": redirect_http_to_https,
+ "region": region,
+ "size": size,
+ "size_unit": size_unit,
+ "sticky_sessions": sticky_sessions,
+ "target_load_balancer_ids": target_load_balancer_ids,
+ "tls_cipher_policy": tls_cipher_policy,
+ "type": type,
+ "vpc_uuid": vpc_uuid,
+ "tag": tag,
+ },
+ load_balancer_create_params.LoadBalancerCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=LoadBalancerCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ lb_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerRetrieveResponse:
+ """
+ To show information about a load balancer instance, send a GET request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ return self._get(
+ f"/v2/load_balancers/{lb_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=LoadBalancerRetrieveResponse,
+ )
+
+ @overload
+ def update(
+ self,
+ lb_id: str,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ droplet_ids: Iterable[int] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerUpdateResponse:
+ """
+ To update a load balancer's settings, send a PUT request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full
+ representation of the load balancer including existing attributes. It may
+ contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually
+ exclusive. **Note that any attribute that is not provided will be reset to its
+ default value.**
+
+ Args:
+ forwarding_rules: An array of objects specifying the forwarding rules for a load balancer.
+
+ algorithm: This field has been deprecated. You can no longer specify an algorithm for load
+ balancers.
+
+ disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+
+ domains: An array of objects specifying the domain configurations for a Global load
+ balancer.
+
+ droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer.
+
+ enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+
+ enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use.
+
+ firewall: An object specifying allow and deny rules to control traffic to the load
+ balancer.
+
+ glb_settings: An object specifying forwarding configurations for a Global load balancer.
+
+ health_check: An object specifying health check settings for the load balancer.
+
+ http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+
+ name: A human-readable name for a load balancer instance.
+
+ network: A string indicating whether the load balancer should be external or internal.
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+
+ network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+
+ project_id: The ID of the project that the load balancer is associated with. If no ID is
+ provided at creation, the load balancer associates with the user's default
+ project. If an invalid project ID is provided, the load balancer will not be
+ created.
+
+ redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size: This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+
+ size_unit: How many nodes the load balancer contains. Each additional node increases the
+ load balancer's ability to manage more connections. Load balancers can be scaled
+ up or down, and you can change the number of nodes after creation up to once per
+ hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions.
+ Use the `size` field to scale load balancers that reside in these regions.
+
+ sticky_sessions: An object specifying sticky sessions settings for the load balancer.
+
+ target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+
+ tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+
+ type: A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def update(
+ self,
+ lb_id: str,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ tag: str | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerUpdateResponse:
+ """
+ To update a load balancer's settings, send a PUT request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full
+ representation of the load balancer including existing attributes. It may
+ contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually
+ exclusive. **Note that any attribute that is not provided will be reset to its
+ default value.**
+
+ Args:
+ forwarding_rules: An array of objects specifying the forwarding rules for a load balancer.
+
+ algorithm: This field has been deprecated. You can no longer specify an algorithm for load
+ balancers.
+
+ disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+
+ domains: An array of objects specifying the domain configurations for a Global load
+ balancer.
+
+ enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+
+ enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use.
+
+ firewall: An object specifying allow and deny rules to control traffic to the load
+ balancer.
+
+ glb_settings: An object specifying forwarding configurations for a Global load balancer.
+
+ health_check: An object specifying health check settings for the load balancer.
+
+ http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+
+ name: A human-readable name for a load balancer instance.
+
+ network: A string indicating whether the load balancer should be external or internal.
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+
+ network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+
+ project_id: The ID of the project that the load balancer is associated with. If no ID is
+ provided at creation, the load balancer associates with the user's default
+ project. If an invalid project ID is provided, the load balancer will not be
+ created.
+
+ redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size: This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+
+ size_unit: How many nodes the load balancer contains. Each additional node increases the
+ load balancer's ability to manage more connections. Load balancers can be scaled
+ up or down, and you can change the number of nodes after creation up to once per
+ hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions.
+ Use the `size` field to scale load balancers that reside in these regions.
+
+ sticky_sessions: An object specifying sticky sessions settings for the load balancer.
+
+ tag: The name of a Droplet tag corresponding to Droplets assigned to the load
+ balancer.
+
+ target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+
+ tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+
+ type: A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["forwarding_rules"])
+ def update(
+ self,
+ lb_id: str,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ droplet_ids: Iterable[int] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ tag: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerUpdateResponse:
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ return self._put(
+ f"/v2/load_balancers/{lb_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}",
+ body=maybe_transform(
+ {
+ "forwarding_rules": forwarding_rules,
+ "algorithm": algorithm,
+ "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records,
+ "domains": domains,
+ "droplet_ids": droplet_ids,
+ "enable_backend_keepalive": enable_backend_keepalive,
+ "enable_proxy_protocol": enable_proxy_protocol,
+ "firewall": firewall,
+ "glb_settings": glb_settings,
+ "health_check": health_check,
+ "http_idle_timeout_seconds": http_idle_timeout_seconds,
+ "name": name,
+ "network": network,
+ "network_stack": network_stack,
+ "project_id": project_id,
+ "redirect_http_to_https": redirect_http_to_https,
+ "region": region,
+ "size": size,
+ "size_unit": size_unit,
+ "sticky_sessions": sticky_sessions,
+ "target_load_balancer_ids": target_load_balancer_ids,
+ "tls_cipher_policy": tls_cipher_policy,
+ "type": type,
+ "vpc_uuid": vpc_uuid,
+ "tag": tag,
+ },
+ load_balancer_update_params.LoadBalancerUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=LoadBalancerUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerListResponse:
+ """
+ To list all of the load balancer instances on your account, send a GET request
+ to `/v2/load_balancers`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/load_balancers"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/load_balancers",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ load_balancer_list_params.LoadBalancerListParams,
+ ),
+ ),
+ cast_to=LoadBalancerListResponse,
+ )
+
+ def delete(
+ self,
+ lb_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a load balancer instance, disassociating any Droplets assigned to it
+ and removing it from your account, send a DELETE request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID`.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/load_balancers/{lb_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def delete_cache(
+ self,
+ lb_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a Global load balancer CDN cache, send a DELETE request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID/cache`.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/load_balancers/{lb_id}/cache"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/cache",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncLoadBalancersResource(AsyncAPIResource):
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+
+ @cached_property
+ def droplets(self) -> AsyncDropletsResource:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return AsyncDropletsResource(self._client)
+
+ @cached_property
+ def forwarding_rules(self) -> AsyncForwardingRulesResource:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return AsyncForwardingRulesResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncLoadBalancersResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncLoadBalancersResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncLoadBalancersResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncLoadBalancersResourceWithStreamingResponse(self)
+
+ @overload
+ async def create(
+ self,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ droplet_ids: Iterable[int] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerCreateResponse:
+ """
+ To create a new load balancer instance, send a POST request to
+ `/v2/load_balancers`.
+
+ You can specify the Droplets that will sit behind the load balancer using one of
+ two methods:
+
+ - Set `droplet_ids` to a list of specific Droplet IDs.
+ - Set `tag` to the name of a tag. All Droplets with this tag applied will be
+ assigned to the load balancer. Additional Droplets will be automatically
+ assigned as they are tagged.
+
+ These methods are mutually exclusive.
+
+ Args:
+ forwarding_rules: An array of objects specifying the forwarding rules for a load balancer.
+
+ algorithm: This field has been deprecated. You can no longer specify an algorithm for load
+ balancers.
+
+ disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+
+ domains: An array of objects specifying the domain configurations for a Global load
+ balancer.
+
+ droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer.
+
+ enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+
+ enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use.
+
+ firewall: An object specifying allow and deny rules to control traffic to the load
+ balancer.
+
+ glb_settings: An object specifying forwarding configurations for a Global load balancer.
+
+ health_check: An object specifying health check settings for the load balancer.
+
+ http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+
+ name: A human-readable name for a load balancer instance.
+
+ network: A string indicating whether the load balancer should be external or internal.
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+
+ network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+
+ project_id: The ID of the project that the load balancer is associated with. If no ID is
+ provided at creation, the load balancer associates with the user's default
+ project. If an invalid project ID is provided, the load balancer will not be
+ created.
+
+ redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size: This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+
+ size_unit: How many nodes the load balancer contains. Each additional node increases the
+ load balancer's ability to manage more connections. Load balancers can be scaled
+ up or down, and you can change the number of nodes after creation up to once per
+ hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions.
+ Use the `size` field to scale load balancers that reside in these regions.
+
+ sticky_sessions: An object specifying sticky sessions settings for the load balancer.
+
+ target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+
+ tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+
+ type: A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ tag: str | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerCreateResponse:
+ """
+ To create a new load balancer instance, send a POST request to
+ `/v2/load_balancers`.
+
+ You can specify the Droplets that will sit behind the load balancer using one of
+ two methods:
+
+ - Set `droplet_ids` to a list of specific Droplet IDs.
+ - Set `tag` to the name of a tag. All Droplets with this tag applied will be
+ assigned to the load balancer. Additional Droplets will be automatically
+ assigned as they are tagged.
+
+ These methods are mutually exclusive.
+
+ Args:
+ forwarding_rules: An array of objects specifying the forwarding rules for a load balancer.
+
+ algorithm: This field has been deprecated. You can no longer specify an algorithm for load
+ balancers.
+
+ disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+
+ domains: An array of objects specifying the domain configurations for a Global load
+ balancer.
+
+ enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+
+ enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use.
+
+ firewall: An object specifying allow and deny rules to control traffic to the load
+ balancer.
+
+ glb_settings: An object specifying forwarding configurations for a Global load balancer.
+
+ health_check: An object specifying health check settings for the load balancer.
+
+ http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+
+ name: A human-readable name for a load balancer instance.
+
+ network: A string indicating whether the load balancer should be external or internal.
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+
+ network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+
+ project_id: The ID of the project that the load balancer is associated with. If no ID is
+ provided at creation, the load balancer associates with the user's default
+ project. If an invalid project ID is provided, the load balancer will not be
+ created.
+
+ redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size: This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+
+ size_unit: How many nodes the load balancer contains. Each additional node increases the
+ load balancer's ability to manage more connections. Load balancers can be scaled
+ up or down, and you can change the number of nodes after creation up to once per
+ hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions.
+ Use the `size` field to scale load balancers that reside in these regions.
+
+ sticky_sessions: An object specifying sticky sessions settings for the load balancer.
+
+ tag: The name of a Droplet tag corresponding to Droplets assigned to the load
+ balancer.
+
+ target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+
+ tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+
+ type: A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["forwarding_rules"])
+ async def create(
+ self,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ droplet_ids: Iterable[int] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ tag: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerCreateResponse:
+ return await self._post(
+ "/v2/load_balancers"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/load_balancers",
+ body=await async_maybe_transform(
+ {
+ "forwarding_rules": forwarding_rules,
+ "algorithm": algorithm,
+ "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records,
+ "domains": domains,
+ "droplet_ids": droplet_ids,
+ "enable_backend_keepalive": enable_backend_keepalive,
+ "enable_proxy_protocol": enable_proxy_protocol,
+ "firewall": firewall,
+ "glb_settings": glb_settings,
+ "health_check": health_check,
+ "http_idle_timeout_seconds": http_idle_timeout_seconds,
+ "name": name,
+ "network": network,
+ "network_stack": network_stack,
+ "project_id": project_id,
+ "redirect_http_to_https": redirect_http_to_https,
+ "region": region,
+ "size": size,
+ "size_unit": size_unit,
+ "sticky_sessions": sticky_sessions,
+ "target_load_balancer_ids": target_load_balancer_ids,
+ "tls_cipher_policy": tls_cipher_policy,
+ "type": type,
+ "vpc_uuid": vpc_uuid,
+ "tag": tag,
+ },
+ load_balancer_create_params.LoadBalancerCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=LoadBalancerCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ lb_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerRetrieveResponse:
+ """
+ To show information about a load balancer instance, send a GET request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ return await self._get(
+ f"/v2/load_balancers/{lb_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=LoadBalancerRetrieveResponse,
+ )
+
+ @overload
+ async def update(
+ self,
+ lb_id: str,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ droplet_ids: Iterable[int] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerUpdateResponse:
+ """
+ To update a load balancer's settings, send a PUT request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full
+ representation of the load balancer including existing attributes. It may
+ contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually
+ exclusive. **Note that any attribute that is not provided will be reset to its
+ default value.**
+
+ Args:
+ forwarding_rules: An array of objects specifying the forwarding rules for a load balancer.
+
+ algorithm: This field has been deprecated. You can no longer specify an algorithm for load
+ balancers.
+
+ disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+
+ domains: An array of objects specifying the domain configurations for a Global load
+ balancer.
+
+ droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer.
+
+ enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+
+ enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use.
+
+ firewall: An object specifying allow and deny rules to control traffic to the load
+ balancer.
+
+ glb_settings: An object specifying forwarding configurations for a Global load balancer.
+
+ health_check: An object specifying health check settings for the load balancer.
+
+ http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+
+ name: A human-readable name for a load balancer instance.
+
+ network: A string indicating whether the load balancer should be external or internal.
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+
+ network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+
+ project_id: The ID of the project that the load balancer is associated with. If no ID is
+ provided at creation, the load balancer associates with the user's default
+ project. If an invalid project ID is provided, the load balancer will not be
+ created.
+
+ redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size: This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+
+ size_unit: How many nodes the load balancer contains. Each additional node increases the
+ load balancer's ability to manage more connections. Load balancers can be scaled
+ up or down, and you can change the number of nodes after creation up to once per
+ hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions.
+ Use the `size` field to scale load balancers that reside in these regions.
+
+ sticky_sessions: An object specifying sticky sessions settings for the load balancer.
+
+ target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+
+ tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+
+ type: A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def update(
+ self,
+ lb_id: str,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ tag: str | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerUpdateResponse:
+ """
+ To update a load balancer's settings, send a PUT request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full
+ representation of the load balancer including existing attributes. It may
+ contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually
+ exclusive. **Note that any attribute that is not provided will be reset to its
+ default value.**
+
+ Args:
+ forwarding_rules: An array of objects specifying the forwarding rules for a load balancer.
+
+ algorithm: This field has been deprecated. You can no longer specify an algorithm for load
+ balancers.
+
+ disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+
+ domains: An array of objects specifying the domain configurations for a Global load
+ balancer.
+
+ enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+
+ enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use.
+
+ firewall: An object specifying allow and deny rules to control traffic to the load
+ balancer.
+
+ glb_settings: An object specifying forwarding configurations for a Global load balancer.
+
+ health_check: An object specifying health check settings for the load balancer.
+
+ http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+
+ name: A human-readable name for a load balancer instance.
+
+ network: A string indicating whether the load balancer should be external or internal.
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+
+ network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+
+ project_id: The ID of the project that the load balancer is associated with. If no ID is
+ provided at creation, the load balancer associates with the user's default
+ project. If an invalid project ID is provided, the load balancer will not be
+ created.
+
+ redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size: This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+
+ size_unit: How many nodes the load balancer contains. Each additional node increases the
+ load balancer's ability to manage more connections. Load balancers can be scaled
+ up or down, and you can change the number of nodes after creation up to once per
+ hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions.
+ Use the `size` field to scale load balancers that reside in these regions.
+
+ sticky_sessions: An object specifying sticky sessions settings for the load balancer.
+
+ tag: The name of a Droplet tag corresponding to Droplets assigned to the load
+ balancer.
+
+ target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+
+ tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+
+ type: A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+
+ vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["forwarding_rules"])
+ async def update(
+ self,
+ lb_id: str,
+ *,
+ forwarding_rules: Iterable[ForwardingRuleParam],
+ algorithm: Literal["round_robin", "least_connections"] | Omit = omit,
+ disable_lets_encrypt_dns_records: bool | Omit = omit,
+ domains: Iterable[DomainsParam] | Omit = omit,
+ droplet_ids: Iterable[int] | Omit = omit,
+ enable_backend_keepalive: bool | Omit = omit,
+ enable_proxy_protocol: bool | Omit = omit,
+ firewall: LbFirewallParam | Omit = omit,
+ glb_settings: GlbSettingsParam | Omit = omit,
+ health_check: HealthCheckParam | Omit = omit,
+ http_idle_timeout_seconds: int | Omit = omit,
+ name: str | Omit = omit,
+ network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit,
+ network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit,
+ project_id: str | Omit = omit,
+ redirect_http_to_https: bool | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit,
+ size_unit: int | Omit = omit,
+ sticky_sessions: StickySessionsParam | Omit = omit,
+ target_load_balancer_ids: SequenceNotStr[str] | Omit = omit,
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit,
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ tag: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerUpdateResponse:
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ return await self._put(
+ f"/v2/load_balancers/{lb_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}",
+ body=await async_maybe_transform(
+ {
+ "forwarding_rules": forwarding_rules,
+ "algorithm": algorithm,
+ "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records,
+ "domains": domains,
+ "droplet_ids": droplet_ids,
+ "enable_backend_keepalive": enable_backend_keepalive,
+ "enable_proxy_protocol": enable_proxy_protocol,
+ "firewall": firewall,
+ "glb_settings": glb_settings,
+ "health_check": health_check,
+ "http_idle_timeout_seconds": http_idle_timeout_seconds,
+ "name": name,
+ "network": network,
+ "network_stack": network_stack,
+ "project_id": project_id,
+ "redirect_http_to_https": redirect_http_to_https,
+ "region": region,
+ "size": size,
+ "size_unit": size_unit,
+ "sticky_sessions": sticky_sessions,
+ "target_load_balancer_ids": target_load_balancer_ids,
+ "tls_cipher_policy": tls_cipher_policy,
+ "type": type,
+ "vpc_uuid": vpc_uuid,
+ "tag": tag,
+ },
+ load_balancer_update_params.LoadBalancerUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=LoadBalancerUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> LoadBalancerListResponse:
+ """
+ To list all of the load balancer instances on your account, send a GET request
+ to `/v2/load_balancers`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/load_balancers"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/load_balancers",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ load_balancer_list_params.LoadBalancerListParams,
+ ),
+ ),
+ cast_to=LoadBalancerListResponse,
+ )
+
+ async def delete(
+ self,
+ lb_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a load balancer instance, disassociating any Droplets assigned to it
+ and removing it from your account, send a DELETE request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID`.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/load_balancers/{lb_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def delete_cache(
+ self,
+ lb_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a Global load balancer CDN cache, send a DELETE request to
+ `/v2/load_balancers/$LOAD_BALANCER_ID/cache`.
+
+ A successful request will receive a 204 status code with no body in response.
+ This indicates that the request was processed successfully.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not lb_id:
+ raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/load_balancers/{lb_id}/cache"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/cache",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class LoadBalancersResourceWithRawResponse:
+ def __init__(self, load_balancers: LoadBalancersResource) -> None:
+ self._load_balancers = load_balancers
+
+ self.create = to_raw_response_wrapper(
+ load_balancers.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ load_balancers.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ load_balancers.update,
+ )
+ self.list = to_raw_response_wrapper(
+ load_balancers.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ load_balancers.delete,
+ )
+ self.delete_cache = to_raw_response_wrapper(
+ load_balancers.delete_cache,
+ )
+
+ @cached_property
+ def droplets(self) -> DropletsResourceWithRawResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return DropletsResourceWithRawResponse(self._load_balancers.droplets)
+
+ @cached_property
+ def forwarding_rules(self) -> ForwardingRulesResourceWithRawResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return ForwardingRulesResourceWithRawResponse(self._load_balancers.forwarding_rules)
+
+
+class AsyncLoadBalancersResourceWithRawResponse:
+ def __init__(self, load_balancers: AsyncLoadBalancersResource) -> None:
+ self._load_balancers = load_balancers
+
+ self.create = async_to_raw_response_wrapper(
+ load_balancers.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ load_balancers.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ load_balancers.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ load_balancers.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ load_balancers.delete,
+ )
+ self.delete_cache = async_to_raw_response_wrapper(
+ load_balancers.delete_cache,
+ )
+
+ @cached_property
+ def droplets(self) -> AsyncDropletsResourceWithRawResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return AsyncDropletsResourceWithRawResponse(self._load_balancers.droplets)
+
+ @cached_property
+ def forwarding_rules(self) -> AsyncForwardingRulesResourceWithRawResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return AsyncForwardingRulesResourceWithRawResponse(self._load_balancers.forwarding_rules)
+
+
+class LoadBalancersResourceWithStreamingResponse:
+ def __init__(self, load_balancers: LoadBalancersResource) -> None:
+ self._load_balancers = load_balancers
+
+ self.create = to_streamed_response_wrapper(
+ load_balancers.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ load_balancers.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ load_balancers.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ load_balancers.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ load_balancers.delete,
+ )
+ self.delete_cache = to_streamed_response_wrapper(
+ load_balancers.delete_cache,
+ )
+
+ @cached_property
+ def droplets(self) -> DropletsResourceWithStreamingResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return DropletsResourceWithStreamingResponse(self._load_balancers.droplets)
+
+ @cached_property
+ def forwarding_rules(self) -> ForwardingRulesResourceWithStreamingResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return ForwardingRulesResourceWithStreamingResponse(self._load_balancers.forwarding_rules)
+
+
+class AsyncLoadBalancersResourceWithStreamingResponse:
+ def __init__(self, load_balancers: AsyncLoadBalancersResource) -> None:
+ self._load_balancers = load_balancers
+
+ self.create = async_to_streamed_response_wrapper(
+ load_balancers.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ load_balancers.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ load_balancers.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ load_balancers.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ load_balancers.delete,
+ )
+ self.delete_cache = async_to_streamed_response_wrapper(
+ load_balancers.delete_cache,
+ )
+
+ @cached_property
+ def droplets(self) -> AsyncDropletsResourceWithStreamingResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return AsyncDropletsResourceWithStreamingResponse(self._load_balancers.droplets)
+
+ @cached_property
+ def forwarding_rules(self) -> AsyncForwardingRulesResourceWithStreamingResponse:
+ """
+ [DigitalOcean Load Balancers](https://docs.digitalocean.com/products/networking/load-balancers/)
+ provide a way to distribute traffic across multiple Droplets. By sending
+ requests to the `/v2/load_balancers` endpoint, you can list, create, or
+ delete load balancers as well as add or remove Droplets, forwarding rules,
+ and other configuration details.
+ """
+ return AsyncForwardingRulesResourceWithStreamingResponse(self._load_balancers.forwarding_rules)
diff --git a/src/gradient/resources/gpu_droplets/sizes.py b/src/gradient/resources/gpu_droplets/sizes.py
new file mode 100644
index 00000000..83d77052
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/sizes.py
@@ -0,0 +1,221 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.gpu_droplets import size_list_params
+from ...types.gpu_droplets.size_list_response import SizeListResponse
+
+__all__ = ["SizesResource", "AsyncSizesResource"]
+
+
+class SizesResource(SyncAPIResource):
+ """
+ The sizes objects represent different packages of hardware resources that
+ can be used for Droplets. When a Droplet is created, a size must be
+ selected so that the correct resources can be allocated.
+
+ Each size represents a plan that bundles together specific sets of
+ resources. This includes the amount of RAM, the number of virtual CPUs,
+ disk space, and transfer. The size object also includes the pricing
+ details and the regions that the size is available in.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> SizesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return SizesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> SizesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return SizesResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SizeListResponse:
+ """To list all of available Droplet sizes, send a GET request to `/v2/sizes`.
+
+ The
+ response will be a JSON object with a key called `sizes`. The value of this will
+ be an array of `size` objects each of which contain the standard size
+ attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/sizes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/sizes",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ size_list_params.SizeListParams,
+ ),
+ ),
+ cast_to=SizeListResponse,
+ )
+
+
+class AsyncSizesResource(AsyncAPIResource):
+ """
+ The sizes objects represent different packages of hardware resources that
+ can be used for Droplets. When a Droplet is created, a size must be
+ selected so that the correct resources can be allocated.
+
+ Each size represents a plan that bundles together specific sets of
+ resources. This includes the amount of RAM, the number of virtual CPUs,
+ disk space, and transfer. The size object also includes the pricing
+ details and the regions that the size is available in.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncSizesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncSizesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncSizesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncSizesResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SizeListResponse:
+ """To list all of available Droplet sizes, send a GET request to `/v2/sizes`.
+
+ The
+ response will be a JSON object with a key called `sizes`. The value of this will
+ be an array of `size` objects each of which contain the standard size
+ attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/sizes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/sizes",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ size_list_params.SizeListParams,
+ ),
+ ),
+ cast_to=SizeListResponse,
+ )
+
+
+class SizesResourceWithRawResponse:
+ def __init__(self, sizes: SizesResource) -> None:
+ self._sizes = sizes
+
+ self.list = to_raw_response_wrapper(
+ sizes.list,
+ )
+
+
+class AsyncSizesResourceWithRawResponse:
+ def __init__(self, sizes: AsyncSizesResource) -> None:
+ self._sizes = sizes
+
+ self.list = async_to_raw_response_wrapper(
+ sizes.list,
+ )
+
+
+class SizesResourceWithStreamingResponse:
+ def __init__(self, sizes: SizesResource) -> None:
+ self._sizes = sizes
+
+ self.list = to_streamed_response_wrapper(
+ sizes.list,
+ )
+
+
+class AsyncSizesResourceWithStreamingResponse:
+ def __init__(self, sizes: AsyncSizesResource) -> None:
+ self._sizes = sizes
+
+ self.list = async_to_streamed_response_wrapper(
+ sizes.list,
+ )
diff --git a/src/gradient/resources/gpu_droplets/snapshots.py b/src/gradient/resources/gpu_droplets/snapshots.py
new file mode 100644
index 00000000..12cf20da
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/snapshots.py
@@ -0,0 +1,449 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.gpu_droplets import snapshot_list_params
+from ...types.gpu_droplets.snapshot_list_response import SnapshotListResponse
+from ...types.gpu_droplets.snapshot_retrieve_response import SnapshotRetrieveResponse
+
+__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"]
+
+
+class SnapshotsResource(SyncAPIResource):
+ """
+ [Snapshots](https://docs.digitalocean.com/products/snapshots/) are saved
+ instances of a Droplet or a block storage volume, which is reflected in
+ the `resource_type` attribute. In order to avoid problems with compressing
+ filesystems, each defines a `min_disk_size` attribute which is the minimum
+ size of the Droplet or volume disk when creating a new resource from the
+ saved snapshot.
+
+ To interact with snapshots, you will generally send requests to the
+ snapshots endpoint at `/v2/snapshots`.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> SnapshotsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return SnapshotsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return SnapshotsResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ snapshot_id: Union[int, str],
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotRetrieveResponse:
+ """
+ To retrieve information about a snapshot, send a GET request to
+ `/v2/snapshots/$SNAPSHOT_ID`.
+
+ The response will be a JSON object with a key called `snapshot`. The value of
+ this will be an snapshot object containing the standard snapshot attributes.
+
+ Args:
+ snapshot_id: The ID of a Droplet snapshot.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ f"/v2/snapshots/{snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SnapshotRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ resource_type: Literal["droplet", "volume"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotListResponse:
+ """
+ To list all of the snapshots available on your account, send a GET request to
+ `/v2/snapshots`.
+
+ The response will be a JSON object with a key called `snapshots`. This will be
+ set to an array of `snapshot` objects, each of which will contain the standard
+ snapshot attributes.
+
+ ### Filtering Results by Resource Type
+
+ It's possible to request filtered results by including certain query parameters.
+
+ #### List Droplet Snapshots
+
+ To retrieve only snapshots based on Droplets, include the `resource_type` query
+ parameter set to `droplet`. For example, `/v2/snapshots?resource_type=droplet`.
+
+ #### List Volume Snapshots
+
+ To retrieve only snapshots based on volumes, include the `resource_type` query
+ parameter set to `volume`. For example, `/v2/snapshots?resource_type=volume`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ resource_type: Used to filter snapshots by a resource type.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/snapshots" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/snapshots",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ "resource_type": resource_type,
+ },
+ snapshot_list_params.SnapshotListParams,
+ ),
+ ),
+ cast_to=SnapshotListResponse,
+ )
+
+ def delete(
+ self,
+ snapshot_id: Union[int, str],
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ Both Droplet and volume snapshots are managed through the `/v2/snapshots/`
+ endpoint. To delete a snapshot, send a DELETE request to
+ `/v2/snapshots/$SNAPSHOT_ID`.
+
+ A status of 204 will be given. This indicates that the request was processed
+ successfully, but that no response body is needed.
+
+ Args:
+ snapshot_id: The ID of a Droplet snapshot.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/snapshots/{snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncSnapshotsResource(AsyncAPIResource):
+ """
+ [Snapshots](https://docs.digitalocean.com/products/snapshots/) are saved
+ instances of a Droplet or a block storage volume, which is reflected in
+ the `resource_type` attribute. In order to avoid problems with compressing
+ filesystems, each defines a `min_disk_size` attribute which is the minimum
+ size of the Droplet or volume disk when creating a new resource from the
+ saved snapshot.
+
+ To interact with snapshots, you will generally send requests to the
+ snapshots endpoint at `/v2/snapshots`.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncSnapshotsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncSnapshotsResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ snapshot_id: Union[int, str],
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotRetrieveResponse:
+ """
+ To retrieve information about a snapshot, send a GET request to
+ `/v2/snapshots/$SNAPSHOT_ID`.
+
+ The response will be a JSON object with a key called `snapshot`. The value of
+ this will be an snapshot object containing the standard snapshot attributes.
+
+ Args:
+ snapshot_id: The ID of a Droplet snapshot.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ f"/v2/snapshots/{snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SnapshotRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ resource_type: Literal["droplet", "volume"] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotListResponse:
+ """
+ To list all of the snapshots available on your account, send a GET request to
+ `/v2/snapshots`.
+
+ The response will be a JSON object with a key called `snapshots`. This will be
+ set to an array of `snapshot` objects, each of which will contain the standard
+ snapshot attributes.
+
+ ### Filtering Results by Resource Type
+
+ It's possible to request filtered results by including certain query parameters.
+
+ #### List Droplet Snapshots
+
+ To retrieve only snapshots based on Droplets, include the `resource_type` query
+ parameter set to `droplet`. For example, `/v2/snapshots?resource_type=droplet`.
+
+ #### List Volume Snapshots
+
+ To retrieve only snapshots based on volumes, include the `resource_type` query
+ parameter set to `volume`. For example, `/v2/snapshots?resource_type=volume`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ resource_type: Used to filter snapshots by a resource type.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/snapshots" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/snapshots",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ "resource_type": resource_type,
+ },
+ snapshot_list_params.SnapshotListParams,
+ ),
+ ),
+ cast_to=SnapshotListResponse,
+ )
+
+ async def delete(
+ self,
+ snapshot_id: Union[int, str],
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ Both Droplet and volume snapshots are managed through the `/v2/snapshots/`
+ endpoint. To delete a snapshot, send a DELETE request to
+ `/v2/snapshots/$SNAPSHOT_ID`.
+
+ A status of 204 will be given. This indicates that the request was processed
+ successfully, but that no response body is needed.
+
+ Args:
+ snapshot_id: The ID of a Droplet snapshot.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/snapshots/{snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class SnapshotsResourceWithRawResponse:
+ def __init__(self, snapshots: SnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = to_raw_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class AsyncSnapshotsResourceWithRawResponse:
+ def __init__(self, snapshots: AsyncSnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = async_to_raw_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class SnapshotsResourceWithStreamingResponse:
+ def __init__(self, snapshots: SnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = to_streamed_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class AsyncSnapshotsResourceWithStreamingResponse:
+ def __init__(self, snapshots: AsyncSnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ snapshots.delete,
+ )
diff --git a/src/gradient/resources/gpu_droplets/volumes/__init__.py b/src/gradient/resources/gpu_droplets/volumes/__init__.py
new file mode 100644
index 00000000..167db0b3
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/volumes/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .actions import (
+ ActionsResource,
+ AsyncActionsResource,
+ ActionsResourceWithRawResponse,
+ AsyncActionsResourceWithRawResponse,
+ ActionsResourceWithStreamingResponse,
+ AsyncActionsResourceWithStreamingResponse,
+)
+from .volumes import (
+ VolumesResource,
+ AsyncVolumesResource,
+ VolumesResourceWithRawResponse,
+ AsyncVolumesResourceWithRawResponse,
+ VolumesResourceWithStreamingResponse,
+ AsyncVolumesResourceWithStreamingResponse,
+)
+from .snapshots import (
+ SnapshotsResource,
+ AsyncSnapshotsResource,
+ SnapshotsResourceWithRawResponse,
+ AsyncSnapshotsResourceWithRawResponse,
+ SnapshotsResourceWithStreamingResponse,
+ AsyncSnapshotsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "ActionsResource",
+ "AsyncActionsResource",
+ "ActionsResourceWithRawResponse",
+ "AsyncActionsResourceWithRawResponse",
+ "ActionsResourceWithStreamingResponse",
+ "AsyncActionsResourceWithStreamingResponse",
+ "SnapshotsResource",
+ "AsyncSnapshotsResource",
+ "SnapshotsResourceWithRawResponse",
+ "AsyncSnapshotsResourceWithRawResponse",
+ "SnapshotsResourceWithStreamingResponse",
+ "AsyncSnapshotsResourceWithStreamingResponse",
+ "VolumesResource",
+ "AsyncVolumesResource",
+ "VolumesResourceWithRawResponse",
+ "AsyncVolumesResourceWithRawResponse",
+ "VolumesResourceWithStreamingResponse",
+ "AsyncVolumesResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/gpu_droplets/volumes/actions.py b/src/gradient/resources/gpu_droplets/volumes/actions.py
new file mode 100644
index 00000000..9b145567
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/volumes/actions.py
@@ -0,0 +1,1574 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ...._utils import required_args, maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets.volumes import (
+ action_list_params,
+ action_retrieve_params,
+ action_initiate_by_id_params,
+ action_initiate_by_name_params,
+)
+from ....types.gpu_droplets.volumes.action_list_response import ActionListResponse
+from ....types.gpu_droplets.volumes.action_retrieve_response import ActionRetrieveResponse
+from ....types.gpu_droplets.volumes.action_initiate_by_id_response import ActionInitiateByIDResponse
+from ....types.gpu_droplets.volumes.action_initiate_by_name_response import ActionInitiateByNameResponse
+
+__all__ = ["ActionsResource", "AsyncActionsResource"]
+
+
+class ActionsResource(SyncAPIResource):
+ """
+ Block storage actions are commands that can be given to a DigitalOcean
+ Block Storage Volume. An example would be detaching or attaching a volume
+ from a Droplet. These requests are made on the
+ `/v2/volumes/$VOLUME_ID/actions` endpoint.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> ActionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ActionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ActionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ActionsResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ action_id: int,
+ *,
+ volume_id: str,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionRetrieveResponse:
+ """
+ To retrieve the status of a volume action, send a GET request to
+ `/v2/volumes/$VOLUME_ID/actions/$ACTION_ID`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return self._get(
+ f"/v2/volumes/{volume_id}/actions/{action_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions/{action_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ action_retrieve_params.ActionRetrieveParams,
+ ),
+ ),
+ cast_to=ActionRetrieveResponse,
+ )
+
+ def list(
+ self,
+ volume_id: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionListResponse:
+ """
+ To retrieve all actions that have been executed on a volume, send a GET request
+ to `/v2/volumes/$VOLUME_ID/actions`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return self._get(
+ f"/v2/volumes/{volume_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ action_list_params.ActionListParams,
+ ),
+ ),
+ cast_to=ActionListResponse,
+ )
+
+ @overload
+ def initiate_by_id(
+ self,
+ volume_id: str,
+ *,
+ droplet_id: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByIDResponse:
+ """
+ To initiate an action on a block storage volume by Id, send a POST request to
+ `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate
+ attributes for the respective action.
+
+ ## Attach a Block Storage Volume to a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `attach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Each volume may only be attached to a single Droplet. However, up to fifteen
+ volumes may be attached to a Droplet at a time. Pre-formatted volumes will be
+ automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS
+ Droplets created on or after April 26, 2018 when attached. On older Droplets,
+ [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/)
+ is required.
+
+ ## Remove a Block Storage Volume from a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `detach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ ## Resize a Volume
+
+ | Attribute | Details |
+ | -------------- | ------------------------------------------------------------------- |
+ | type | This must be `resize` |
+ | size_gigabytes | The new size of the block storage volume in GiB (1024^3) |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Volumes may only be resized upwards. The maximum size for a volume is 16TiB.
+
+ Args:
+ droplet_id: The unique identifier for the Droplet the volume will be attached or detached
+ from.
+
+ type: The volume action to initiate.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate_by_id(
+ self,
+ volume_id: str,
+ *,
+ droplet_id: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByIDResponse:
+ """
+ To initiate an action on a block storage volume by Id, send a POST request to
+ `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate
+ attributes for the respective action.
+
+ ## Attach a Block Storage Volume to a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `attach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Each volume may only be attached to a single Droplet. However, up to fifteen
+ volumes may be attached to a Droplet at a time. Pre-formatted volumes will be
+ automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS
+ Droplets created on or after April 26, 2018 when attached. On older Droplets,
+ [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/)
+ is required.
+
+ ## Remove a Block Storage Volume from a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `detach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ ## Resize a Volume
+
+ | Attribute | Details |
+ | -------------- | ------------------------------------------------------------------- |
+ | type | This must be `resize` |
+ | size_gigabytes | The new size of the block storage volume in GiB (1024^3) |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Volumes may only be resized upwards. The maximum size for a volume is 16TiB.
+
+ Args:
+ droplet_id: The unique identifier for the Droplet the volume will be attached or detached
+ from.
+
+ type: The volume action to initiate.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate_by_id(
+ self,
+ volume_id: str,
+ *,
+ size_gigabytes: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByIDResponse:
+ """
+ To initiate an action on a block storage volume by Id, send a POST request to
+ `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate
+ attributes for the respective action.
+
+ ## Attach a Block Storage Volume to a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `attach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Each volume may only be attached to a single Droplet. However, up to fifteen
+ volumes may be attached to a Droplet at a time. Pre-formatted volumes will be
+ automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS
+ Droplets created on or after April 26, 2018 when attached. On older Droplets,
+ [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/)
+ is required.
+
+ ## Remove a Block Storage Volume from a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `detach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ ## Resize a Volume
+
+ | Attribute | Details |
+ | -------------- | ------------------------------------------------------------------- |
+ | type | This must be `resize` |
+ | size_gigabytes | The new size of the block storage volume in GiB (1024^3) |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Volumes may only be resized upwards. The maximum size for a volume is 16TiB.
+
+ Args:
+ size_gigabytes: The new size of the block storage volume in GiB (1024^3).
+
+ type: The volume action to initiate.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["droplet_id", "type"], ["size_gigabytes", "type"])
+ def initiate_by_id(
+ self,
+ volume_id: str,
+ *,
+ droplet_id: int | Omit = omit,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ size_gigabytes: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByIDResponse:
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return self._post(
+ f"/v2/volumes/{volume_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions",
+ body=maybe_transform(
+ {
+ "droplet_id": droplet_id,
+ "type": type,
+ "region": region,
+ "tags": tags,
+ "size_gigabytes": size_gigabytes,
+ },
+ action_initiate_by_id_params.ActionInitiateByIDParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ action_initiate_by_id_params.ActionInitiateByIDParams,
+ ),
+ ),
+ cast_to=ActionInitiateByIDResponse,
+ )
+
+ @overload
+ def initiate_by_name(
+ self,
+ *,
+ droplet_id: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByNameResponse:
+ """
+ To initiate an action on a block storage volume by Name, send a POST request to
+ `~/v2/volumes/actions`. The body should contain the appropriate attributes for
+ the respective action.
+
+ ## Attach a Block Storage Volume to a Droplet
+
+ | Attribute | Details |
+ | ----------- | ------------------------------------------------------------------- |
+ | type | This must be `attach` |
+ | volume_name | The name of the block storage volume |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Each volume may only be attached to a single Droplet. However, up to fifteen
+ volumes may be attached to a Droplet at a time. Pre-formatted volumes will be
+ automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS
+ Droplets created on or after April 26, 2018 when attached. On older Droplets,
+ [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/)
+ is required.
+
+ ## Remove a Block Storage Volume from a Droplet
+
+ | Attribute | Details |
+ | ----------- | ------------------------------------------------------------------- |
+ | type | This must be `detach` |
+ | volume_name | The name of the block storage volume |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Args:
+ droplet_id: The unique identifier for the Droplet the volume will be attached or detached
+ from.
+
+ type: The volume action to initiate.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate_by_name(
+ self,
+ *,
+ droplet_id: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByNameResponse:
+ """
+ To initiate an action on a block storage volume by Name, send a POST request to
+ `~/v2/volumes/actions`. The body should contain the appropriate attributes for
+ the respective action.
+
+ ## Attach a Block Storage Volume to a Droplet
+
+ | Attribute | Details |
+ | ----------- | ------------------------------------------------------------------- |
+ | type | This must be `attach` |
+ | volume_name | The name of the block storage volume |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Each volume may only be attached to a single Droplet. However, up to fifteen
+ volumes may be attached to a Droplet at a time. Pre-formatted volumes will be
+ automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS
+ Droplets created on or after April 26, 2018 when attached. On older Droplets,
+ [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/)
+ is required.
+
+ ## Remove a Block Storage Volume from a Droplet
+
+ | Attribute | Details |
+ | ----------- | ------------------------------------------------------------------- |
+ | type | This must be `detach` |
+ | volume_name | The name of the block storage volume |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Args:
+ droplet_id: The unique identifier for the Droplet the volume will be attached or detached
+ from.
+
+ type: The volume action to initiate.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["droplet_id", "type"])
+ def initiate_by_name(
+ self,
+ *,
+ droplet_id: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByNameResponse:
+ return self._post(
+ "/v2/volumes/actions"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/volumes/actions",
+ body=maybe_transform(
+ {
+ "droplet_id": droplet_id,
+ "type": type,
+ "region": region,
+ "tags": tags,
+ },
+ action_initiate_by_name_params.ActionInitiateByNameParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ action_initiate_by_name_params.ActionInitiateByNameParams,
+ ),
+ ),
+ cast_to=ActionInitiateByNameResponse,
+ )
+
+
+class AsyncActionsResource(AsyncAPIResource):
+ """
+ Block storage actions are commands that can be given to a DigitalOcean
+ Block Storage Volume. An example would be detaching or attaching a volume
+ from a Droplet. These requests are made on the
+ `/v2/volumes/$VOLUME_ID/actions` endpoint.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncActionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncActionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncActionsResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ action_id: int,
+ *,
+ volume_id: str,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionRetrieveResponse:
+ """
+ To retrieve the status of a volume action, send a GET request to
+ `/v2/volumes/$VOLUME_ID/actions/$ACTION_ID`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return await self._get(
+ f"/v2/volumes/{volume_id}/actions/{action_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions/{action_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ action_retrieve_params.ActionRetrieveParams,
+ ),
+ ),
+ cast_to=ActionRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ volume_id: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionListResponse:
+ """
+ To retrieve all actions that have been executed on a volume, send a GET request
+ to `/v2/volumes/$VOLUME_ID/actions`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return await self._get(
+ f"/v2/volumes/{volume_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ action_list_params.ActionListParams,
+ ),
+ ),
+ cast_to=ActionListResponse,
+ )
+
+ @overload
+ async def initiate_by_id(
+ self,
+ volume_id: str,
+ *,
+ droplet_id: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByIDResponse:
+ """
+ To initiate an action on a block storage volume by Id, send a POST request to
+ `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate
+ attributes for the respective action.
+
+ ## Attach a Block Storage Volume to a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `attach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Each volume may only be attached to a single Droplet. However, up to fifteen
+ volumes may be attached to a Droplet at a time. Pre-formatted volumes will be
+ automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS
+ Droplets created on or after April 26, 2018 when attached. On older Droplets,
+ [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/)
+ is required.
+
+ ## Remove a Block Storage Volume from a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `detach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ ## Resize a Volume
+
+ | Attribute | Details |
+ | -------------- | ------------------------------------------------------------------- |
+ | type | This must be `resize` |
+ | size_gigabytes | The new size of the block storage volume in GiB (1024^3) |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Volumes may only be resized upwards. The maximum size for a volume is 16TiB.
+
+ Args:
+ droplet_id: The unique identifier for the Droplet the volume will be attached or detached
+ from.
+
+ type: The volume action to initiate.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate_by_id(
+ self,
+ volume_id: str,
+ *,
+ droplet_id: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByIDResponse:
+ """
+ To initiate an action on a block storage volume by Id, send a POST request to
+ `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate
+ attributes for the respective action.
+
+ ## Attach a Block Storage Volume to a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `attach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Each volume may only be attached to a single Droplet. However, up to fifteen
+ volumes may be attached to a Droplet at a time. Pre-formatted volumes will be
+ automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS
+ Droplets created on or after April 26, 2018 when attached. On older Droplets,
+ [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/)
+ is required.
+
+ ## Remove a Block Storage Volume from a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `detach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ ## Resize a Volume
+
+ | Attribute | Details |
+ | -------------- | ------------------------------------------------------------------- |
+ | type | This must be `resize` |
+ | size_gigabytes | The new size of the block storage volume in GiB (1024^3) |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Volumes may only be resized upwards. The maximum size for a volume is 16TiB.
+
+ Args:
+ droplet_id: The unique identifier for the Droplet the volume will be attached or detached
+ from.
+
+ type: The volume action to initiate.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate_by_id(
+ self,
+ volume_id: str,
+ *,
+ size_gigabytes: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByIDResponse:
+ """
+ To initiate an action on a block storage volume by Id, send a POST request to
+ `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate
+ attributes for the respective action.
+
+ ## Attach a Block Storage Volume to a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `attach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Each volume may only be attached to a single Droplet. However, up to fifteen
+ volumes may be attached to a Droplet at a time. Pre-formatted volumes will be
+ automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS
+ Droplets created on or after April 26, 2018 when attached. On older Droplets,
+ [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/)
+ is required.
+
+ ## Remove a Block Storage Volume from a Droplet
+
+ | Attribute | Details |
+ | ---------- | ------------------------------------------------------------------- |
+ | type | This must be `detach` |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ ## Resize a Volume
+
+ | Attribute | Details |
+ | -------------- | ------------------------------------------------------------------- |
+ | type | This must be `resize` |
+ | size_gigabytes | The new size of the block storage volume in GiB (1024^3) |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Volumes may only be resized upwards. The maximum size for a volume is 16TiB.
+
+ Args:
+ size_gigabytes: The new size of the block storage volume in GiB (1024^3).
+
+ type: The volume action to initiate.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["droplet_id", "type"], ["size_gigabytes", "type"])
+ async def initiate_by_id(
+ self,
+ volume_id: str,
+ *,
+ droplet_id: int | Omit = omit,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ size_gigabytes: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByIDResponse:
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return await self._post(
+ f"/v2/volumes/{volume_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions",
+ body=await async_maybe_transform(
+ {
+ "droplet_id": droplet_id,
+ "type": type,
+ "region": region,
+ "tags": tags,
+ "size_gigabytes": size_gigabytes,
+ },
+ action_initiate_by_id_params.ActionInitiateByIDParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ action_initiate_by_id_params.ActionInitiateByIDParams,
+ ),
+ ),
+ cast_to=ActionInitiateByIDResponse,
+ )
+
+ @overload
+ async def initiate_by_name(
+ self,
+ *,
+ droplet_id: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByNameResponse:
+ """
+ To initiate an action on a block storage volume by Name, send a POST request to
+ `~/v2/volumes/actions`. The body should contain the appropriate attributes for
+ the respective action.
+
+ ## Attach a Block Storage Volume to a Droplet
+
+ | Attribute | Details |
+ | ----------- | ------------------------------------------------------------------- |
+ | type | This must be `attach` |
+ | volume_name | The name of the block storage volume |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Each volume may only be attached to a single Droplet. However, up to fifteen
+ volumes may be attached to a Droplet at a time. Pre-formatted volumes will be
+ automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS
+ Droplets created on or after April 26, 2018 when attached. On older Droplets,
+ [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/)
+ is required.
+
+ ## Remove a Block Storage Volume from a Droplet
+
+ | Attribute | Details |
+ | ----------- | ------------------------------------------------------------------- |
+ | type | This must be `detach` |
+ | volume_name | The name of the block storage volume |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Args:
+ droplet_id: The unique identifier for the Droplet the volume will be attached or detached
+ from.
+
+ type: The volume action to initiate.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate_by_name(
+ self,
+ *,
+ droplet_id: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByNameResponse:
+ """
+ To initiate an action on a block storage volume by Name, send a POST request to
+ `~/v2/volumes/actions`. The body should contain the appropriate attributes for
+ the respective action.
+
+ ## Attach a Block Storage Volume to a Droplet
+
+ | Attribute | Details |
+ | ----------- | ------------------------------------------------------------------- |
+ | type | This must be `attach` |
+ | volume_name | The name of the block storage volume |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Each volume may only be attached to a single Droplet. However, up to fifteen
+ volumes may be attached to a Droplet at a time. Pre-formatted volumes will be
+ automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS
+ Droplets created on or after April 26, 2018 when attached. On older Droplets,
+ [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/)
+ is required.
+
+ ## Remove a Block Storage Volume from a Droplet
+
+ | Attribute | Details |
+ | ----------- | ------------------------------------------------------------------- |
+ | type | This must be `detach` |
+ | volume_name | The name of the block storage volume |
+ | droplet_id | Set to the Droplet's ID |
+ | region | Set to the slug representing the region where the volume is located |
+
+ Args:
+ droplet_id: The unique identifier for the Droplet the volume will be attached or detached
+ from.
+
+ type: The volume action to initiate.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["droplet_id", "type"])
+ async def initiate_by_name(
+ self,
+ *,
+ droplet_id: int,
+ type: Literal["attach", "detach", "resize"],
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ActionInitiateByNameResponse:
+ return await self._post(
+ "/v2/volumes/actions"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/volumes/actions",
+ body=await async_maybe_transform(
+ {
+ "droplet_id": droplet_id,
+ "type": type,
+ "region": region,
+ "tags": tags,
+ },
+ action_initiate_by_name_params.ActionInitiateByNameParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ action_initiate_by_name_params.ActionInitiateByNameParams,
+ ),
+ ),
+ cast_to=ActionInitiateByNameResponse,
+ )
+
+
+class ActionsResourceWithRawResponse:
+ def __init__(self, actions: ActionsResource) -> None:
+ self._actions = actions
+
+ self.retrieve = to_raw_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ actions.list,
+ )
+ self.initiate_by_id = to_raw_response_wrapper(
+ actions.initiate_by_id,
+ )
+ self.initiate_by_name = to_raw_response_wrapper(
+ actions.initiate_by_name,
+ )
+
+
+class AsyncActionsResourceWithRawResponse:
+ def __init__(self, actions: AsyncActionsResource) -> None:
+ self._actions = actions
+
+ self.retrieve = async_to_raw_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ actions.list,
+ )
+ self.initiate_by_id = async_to_raw_response_wrapper(
+ actions.initiate_by_id,
+ )
+ self.initiate_by_name = async_to_raw_response_wrapper(
+ actions.initiate_by_name,
+ )
+
+
+class ActionsResourceWithStreamingResponse:
+ def __init__(self, actions: ActionsResource) -> None:
+ self._actions = actions
+
+ self.retrieve = to_streamed_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ actions.list,
+ )
+ self.initiate_by_id = to_streamed_response_wrapper(
+ actions.initiate_by_id,
+ )
+ self.initiate_by_name = to_streamed_response_wrapper(
+ actions.initiate_by_name,
+ )
+
+
+class AsyncActionsResourceWithStreamingResponse:
+ def __init__(self, actions: AsyncActionsResource) -> None:
+ self._actions = actions
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ actions.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ actions.list,
+ )
+ self.initiate_by_id = async_to_streamed_response_wrapper(
+ actions.initiate_by_id,
+ )
+ self.initiate_by_name = async_to_streamed_response_wrapper(
+ actions.initiate_by_name,
+ )
diff --git a/src/gradient/resources/gpu_droplets/volumes/snapshots.py b/src/gradient/resources/gpu_droplets/volumes/snapshots.py
new file mode 100644
index 00000000..8e2d6422
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/volumes/snapshots.py
@@ -0,0 +1,527 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets.volumes import snapshot_list_params, snapshot_create_params
+from ....types.gpu_droplets.volumes.snapshot_list_response import SnapshotListResponse
+from ....types.gpu_droplets.volumes.snapshot_create_response import SnapshotCreateResponse
+from ....types.gpu_droplets.volumes.snapshot_retrieve_response import SnapshotRetrieveResponse
+
+__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"]
+
+
+class SnapshotsResource(SyncAPIResource):
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+
+ @cached_property
+ def with_raw_response(self) -> SnapshotsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return SnapshotsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return SnapshotsResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ volume_id: str,
+ *,
+ name: str,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotCreateResponse:
+ """
+ To create a snapshot from a volume, sent a POST request to
+ `/v2/volumes/$VOLUME_ID/snapshots`.
+
+ Args:
+ name: A human-readable name for the volume snapshot.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return self._post(
+ f"/v2/volumes/{volume_id}/snapshots"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots",
+ body=maybe_transform(
+ {
+ "name": name,
+ "tags": tags,
+ },
+ snapshot_create_params.SnapshotCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SnapshotCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ snapshot_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotRetrieveResponse:
+ """
+ To retrieve the details of a snapshot that has been created from a volume, send
+ a GET request to `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}")
+ return self._get(
+ f"/v2/volumes/snapshots/{snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SnapshotRetrieveResponse,
+ )
+
+ def list(
+ self,
+ volume_id: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotListResponse:
+ """
+ To retrieve the snapshots that have been created from a volume, send a GET
+ request to `/v2/volumes/$VOLUME_ID/snapshots`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return self._get(
+ f"/v2/volumes/{volume_id}/snapshots"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ snapshot_list_params.SnapshotListParams,
+ ),
+ ),
+ cast_to=SnapshotListResponse,
+ )
+
+ def delete(
+ self,
+ snapshot_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a volume snapshot, send a DELETE request to
+ `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`.
+
+ A status of 204 will be given. This indicates that the request was processed
+ successfully, but that no response body is needed.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/volumes/snapshots/{snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncSnapshotsResource(AsyncAPIResource):
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncSnapshotsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncSnapshotsResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ volume_id: str,
+ *,
+ name: str,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotCreateResponse:
+ """
+ To create a snapshot from a volume, sent a POST request to
+ `/v2/volumes/$VOLUME_ID/snapshots`.
+
+ Args:
+ name: A human-readable name for the volume snapshot.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return await self._post(
+ f"/v2/volumes/{volume_id}/snapshots"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots",
+ body=await async_maybe_transform(
+ {
+ "name": name,
+ "tags": tags,
+ },
+ snapshot_create_params.SnapshotCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SnapshotCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ snapshot_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotRetrieveResponse:
+ """
+ To retrieve the details of a snapshot that has been created from a volume, send
+ a GET request to `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}")
+ return await self._get(
+ f"/v2/volumes/snapshots/{snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SnapshotRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ volume_id: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotListResponse:
+ """
+ To retrieve the snapshots that have been created from a volume, send a GET
+ request to `/v2/volumes/$VOLUME_ID/snapshots`.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return await self._get(
+ f"/v2/volumes/{volume_id}/snapshots"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ snapshot_list_params.SnapshotListParams,
+ ),
+ ),
+ cast_to=SnapshotListResponse,
+ )
+
+ async def delete(
+ self,
+ snapshot_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a volume snapshot, send a DELETE request to
+ `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`.
+
+ A status of 204 will be given. This indicates that the request was processed
+ successfully, but that no response body is needed.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/volumes/snapshots/{snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class SnapshotsResourceWithRawResponse:
+ def __init__(self, snapshots: SnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.create = to_raw_response_wrapper(
+ snapshots.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class AsyncSnapshotsResourceWithRawResponse:
+ def __init__(self, snapshots: AsyncSnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.create = async_to_raw_response_wrapper(
+ snapshots.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class SnapshotsResourceWithStreamingResponse:
+ def __init__(self, snapshots: SnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.create = to_streamed_response_wrapper(
+ snapshots.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class AsyncSnapshotsResourceWithStreamingResponse:
+ def __init__(self, snapshots: AsyncSnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.create = async_to_streamed_response_wrapper(
+ snapshots.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ snapshots.delete,
+ )
diff --git a/src/gradient/resources/gpu_droplets/volumes/volumes.py b/src/gradient/resources/gpu_droplets/volumes/volumes.py
new file mode 100644
index 00000000..76b1db6b
--- /dev/null
+++ b/src/gradient/resources/gpu_droplets/volumes/volumes.py
@@ -0,0 +1,1304 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, overload
+
+import httpx
+
+from .actions import (
+ ActionsResource,
+ AsyncActionsResource,
+ ActionsResourceWithRawResponse,
+ AsyncActionsResourceWithRawResponse,
+ ActionsResourceWithStreamingResponse,
+ AsyncActionsResourceWithStreamingResponse,
+)
+from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
+from ...._utils import required_args, maybe_transform, async_maybe_transform
+from .snapshots import (
+ SnapshotsResource,
+ AsyncSnapshotsResource,
+ SnapshotsResourceWithRawResponse,
+ AsyncSnapshotsResourceWithRawResponse,
+ SnapshotsResourceWithStreamingResponse,
+ AsyncSnapshotsResourceWithStreamingResponse,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.gpu_droplets import volume_list_params, volume_create_params, volume_delete_by_name_params
+from ....types.gpu_droplets.volume_list_response import VolumeListResponse
+from ....types.gpu_droplets.volume_create_response import VolumeCreateResponse
+from ....types.gpu_droplets.volume_retrieve_response import VolumeRetrieveResponse
+
+__all__ = ["VolumesResource", "AsyncVolumesResource"]
+
+
+class VolumesResource(SyncAPIResource):
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+
+ @cached_property
+ def actions(self) -> ActionsResource:
+ """
+ Block storage actions are commands that can be given to a DigitalOcean
+ Block Storage Volume. An example would be detaching or attaching a volume
+ from a Droplet. These requests are made on the
+ `/v2/volumes/$VOLUME_ID/actions` endpoint.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return ActionsResource(self._client)
+
+ @cached_property
+ def snapshots(self) -> SnapshotsResource:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return SnapshotsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> VolumesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return VolumesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> VolumesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return VolumesResourceWithStreamingResponse(self)
+
+ @overload
+ def create(
+ self,
+ *,
+ name: str,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ],
+ size_gigabytes: int,
+ description: str | Omit = omit,
+ filesystem_label: str | Omit = omit,
+ filesystem_type: str | Omit = omit,
+ snapshot_id: str | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VolumeCreateResponse:
+ """To create a new volume, send a POST request to `/v2/volumes`.
+
+ Optionally, a
+ `filesystem_type` attribute may be provided in order to automatically format the
+ volume's filesystem. Pre-formatted volumes are automatically mounted when
+ attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created
+ on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without
+ support for auto-mounting is not recommended.
+
+ Args:
+ name: A human-readable name for the block storage volume. Must be lowercase and be
+ composed only of numbers, letters and "-", up to a limit of 64 characters. The
+ name must begin with a letter.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply
+ when creating a volume from a snapshot.
+
+ description: An optional free-form text field to describe a block storage volume.
+
+ filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may
+ contain 16 characters while labels for xfs type filesystems are limited to 12
+ characters. May only be used in conjunction with filesystem_type.
+
+ filesystem_type: The name of the filesystem type to be used on the volume. When provided, the
+ volume will automatically be formatted to the specified filesystem type.
+ Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are
+ automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic,
+ and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted
+ volumes to other Droplets is not recommended.
+
+ snapshot_id: The unique identifier for the volume snapshot from which to create the volume.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ name: str,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ],
+ size_gigabytes: int,
+ description: str | Omit = omit,
+ filesystem_label: str | Omit = omit,
+ filesystem_type: str | Omit = omit,
+ snapshot_id: str | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VolumeCreateResponse:
+ """To create a new volume, send a POST request to `/v2/volumes`.
+
+ Optionally, a
+ `filesystem_type` attribute may be provided in order to automatically format the
+ volume's filesystem. Pre-formatted volumes are automatically mounted when
+ attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created
+ on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without
+ support for auto-mounting is not recommended.
+
+ Args:
+ name: A human-readable name for the block storage volume. Must be lowercase and be
+ composed only of numbers, letters and "-", up to a limit of 64 characters. The
+ name must begin with a letter.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply
+ when creating a volume from a snapshot.
+
+ description: An optional free-form text field to describe a block storage volume.
+
+ filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may
+ contain 16 characters while labels for xfs type filesystems are limited to 12
+ characters. May only be used in conjunction with filesystem_type.
+
+ filesystem_type: The name of the filesystem type to be used on the volume. When provided, the
+ volume will automatically be formatted to the specified filesystem type.
+ Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are
+ automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic,
+ and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted
+ volumes to other Droplets is not recommended.
+
+ snapshot_id: The unique identifier for the volume snapshot from which to create the volume.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["name", "region", "size_gigabytes"])
+ def create(
+ self,
+ *,
+ name: str,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ],
+ size_gigabytes: int,
+ description: str | Omit = omit,
+ filesystem_label: str | Omit = omit,
+ filesystem_type: str | Omit = omit,
+ snapshot_id: str | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VolumeCreateResponse:
+ return self._post(
+ "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes",
+ body=maybe_transform(
+ {
+ "name": name,
+ "region": region,
+ "size_gigabytes": size_gigabytes,
+ "description": description,
+ "filesystem_label": filesystem_label,
+ "filesystem_type": filesystem_type,
+ "snapshot_id": snapshot_id,
+ "tags": tags,
+ },
+ volume_create_params.VolumeCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VolumeCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ volume_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VolumeRetrieveResponse:
+ """
+ To show information about a block storage volume, send a GET request to
+ `/v2/volumes/$VOLUME_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return self._get(
+ f"/v2/volumes/{volume_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VolumeRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ name: str | Omit = omit,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VolumeListResponse:
+ """
+ To list all of the block storage volumes available on your account, send a GET
+ request to `/v2/volumes`.
+
+ ## Filtering Results
+
+ ### By Region
+
+ The `region` may be provided as query parameter in order to restrict results to
+ volumes available in a specific region. For example: `/v2/volumes?region=nyc1`
+
+ ### By Name
+
+ It is also possible to list volumes on your account that match a specified name.
+ To do so, send a GET request with the volume's name as a query parameter to
+ `/v2/volumes?name=$VOLUME_NAME`. **Note:** You can only create one volume per
+ region with the same name.
+
+ ### By Name and Region
+
+ It is also possible to retrieve information about a block storage volume by
+ name. To do so, send a GET request with the volume's name and the region slug
+ for the region it is located in as query parameters to
+ `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`.
+
+ Args:
+ name: The block storage volume's name.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource is available.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "name": name,
+ "page": page,
+ "per_page": per_page,
+ "region": region,
+ },
+ volume_list_params.VolumeListParams,
+ ),
+ ),
+ cast_to=VolumeListResponse,
+ )
+
+ def delete(
+ self,
+ volume_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a block storage volume, destroying all data and removing it from your
+ account, send a DELETE request to `/v2/volumes/$VOLUME_ID`. No response body
+ will be sent back, but the response code will indicate success. Specifically,
+ the response code will be a 204, which means that the action was successful with
+ no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/volumes/{volume_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def delete_by_name(
+ self,
+ *,
+ name: str | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ Block storage volumes may also be deleted by name by sending a DELETE request
+ with the volume's **name** and the **region slug** for the region it is located
+ in as query parameters to `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. No
+ response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ name: The block storage volume's name.
+
+ region: The slug identifier for the region where the resource is available.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "name": name,
+ "region": region,
+ },
+ volume_delete_by_name_params.VolumeDeleteByNameParams,
+ ),
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncVolumesResource(AsyncAPIResource):
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+
+ @cached_property
+ def actions(self) -> AsyncActionsResource:
+ """
+ Block storage actions are commands that can be given to a DigitalOcean
+ Block Storage Volume. An example would be detaching or attaching a volume
+ from a Droplet. These requests are made on the
+ `/v2/volumes/$VOLUME_ID/actions` endpoint.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return AsyncActionsResource(self._client)
+
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResource:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return AsyncSnapshotsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncVolumesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncVolumesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncVolumesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncVolumesResourceWithStreamingResponse(self)
+
+ @overload
+ async def create(
+ self,
+ *,
+ name: str,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ],
+ size_gigabytes: int,
+ description: str | Omit = omit,
+ filesystem_label: str | Omit = omit,
+ filesystem_type: str | Omit = omit,
+ snapshot_id: str | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VolumeCreateResponse:
+ """To create a new volume, send a POST request to `/v2/volumes`.
+
+ Optionally, a
+ `filesystem_type` attribute may be provided in order to automatically format the
+ volume's filesystem. Pre-formatted volumes are automatically mounted when
+ attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created
+ on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without
+ support for auto-mounting is not recommended.
+
+ Args:
+ name: A human-readable name for the block storage volume. Must be lowercase and be
+ composed only of numbers, letters and "-", up to a limit of 64 characters. The
+ name must begin with a letter.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply
+ when creating a volume from a snapshot.
+
+ description: An optional free-form text field to describe a block storage volume.
+
+ filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may
+ contain 16 characters while labels for xfs type filesystems are limited to 12
+ characters. May only be used in conjunction with filesystem_type.
+
+ filesystem_type: The name of the filesystem type to be used on the volume. When provided, the
+ volume will automatically be formatted to the specified filesystem type.
+ Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are
+ automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic,
+ and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted
+ volumes to other Droplets is not recommended.
+
+ snapshot_id: The unique identifier for the volume snapshot from which to create the volume.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ name: str,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ],
+ size_gigabytes: int,
+ description: str | Omit = omit,
+ filesystem_label: str | Omit = omit,
+ filesystem_type: str | Omit = omit,
+ snapshot_id: str | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VolumeCreateResponse:
+ """To create a new volume, send a POST request to `/v2/volumes`.
+
+ Optionally, a
+ `filesystem_type` attribute may be provided in order to automatically format the
+ volume's filesystem. Pre-formatted volumes are automatically mounted when
+ attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created
+ on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without
+ support for auto-mounting is not recommended.
+
+ Args:
+ name: A human-readable name for the block storage volume. Must be lowercase and be
+ composed only of numbers, letters and "-", up to a limit of 64 characters. The
+ name must begin with a letter.
+
+ region: The slug identifier for the region where the resource will initially be
+ available.
+
+ size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply
+ when creating a volume from a snapshot.
+
+ description: An optional free-form text field to describe a block storage volume.
+
+ filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may
+ contain 16 characters while labels for xfs type filesystems are limited to 12
+ characters. May only be used in conjunction with filesystem_type.
+
+ filesystem_type: The name of the filesystem type to be used on the volume. When provided, the
+ volume will automatically be formatted to the specified filesystem type.
+ Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are
+ automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic,
+ and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted
+ volumes to other Droplets is not recommended.
+
+ snapshot_id: The unique identifier for the volume snapshot from which to create the volume.
+
+ tags: A flat array of tag names as strings to be applied to the resource. Tag names
+ may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["name", "region", "size_gigabytes"])
+ async def create(
+ self,
+ *,
+ name: str,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ],
+ size_gigabytes: int,
+ description: str | Omit = omit,
+ filesystem_label: str | Omit = omit,
+ filesystem_type: str | Omit = omit,
+ snapshot_id: str | Omit = omit,
+ tags: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VolumeCreateResponse:
+ return await self._post(
+ "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes",
+ body=await async_maybe_transform(
+ {
+ "name": name,
+ "region": region,
+ "size_gigabytes": size_gigabytes,
+ "description": description,
+ "filesystem_label": filesystem_label,
+ "filesystem_type": filesystem_type,
+ "snapshot_id": snapshot_id,
+ "tags": tags,
+ },
+ volume_create_params.VolumeCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VolumeCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ volume_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VolumeRetrieveResponse:
+ """
+ To show information about a block storage volume, send a GET request to
+ `/v2/volumes/$VOLUME_ID`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ return await self._get(
+ f"/v2/volumes/{volume_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VolumeRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ name: str | Omit = omit,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VolumeListResponse:
+ """
+ To list all of the block storage volumes available on your account, send a GET
+ request to `/v2/volumes`.
+
+ ## Filtering Results
+
+ ### By Region
+
+ The `region` may be provided as query parameter in order to restrict results to
+ volumes available in a specific region. For example: `/v2/volumes?region=nyc1`
+
+ ### By Name
+
+ It is also possible to list volumes on your account that match a specified name.
+ To do so, send a GET request with the volume's name as a query parameter to
+ `/v2/volumes?name=$VOLUME_NAME`. **Note:** You can only create one volume per
+ region with the same name.
+
+ ### By Name and Region
+
+ It is also possible to retrieve information about a block storage volume by
+ name. To do so, send a GET request with the volume's name and the region slug
+ for the region it is located in as query parameters to
+ `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`.
+
+ Args:
+ name: The block storage volume's name.
+
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ region: The slug identifier for the region where the resource is available.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "name": name,
+ "page": page,
+ "per_page": per_page,
+ "region": region,
+ },
+ volume_list_params.VolumeListParams,
+ ),
+ ),
+ cast_to=VolumeListResponse,
+ )
+
+ async def delete(
+ self,
+ volume_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete a block storage volume, destroying all data and removing it from your
+ account, send a DELETE request to `/v2/volumes/$VOLUME_ID`. No response body
+ will be sent back, but the response code will indicate success. Specifically,
+ the response code will be a 204, which means that the action was successful with
+ no returned body data.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not volume_id:
+ raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/volumes/{volume_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/volumes/{volume_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def delete_by_name(
+ self,
+ *,
+ name: str | Omit = omit,
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ Block storage volumes may also be deleted by name by sending a DELETE request
+ with the volume's **name** and the **region slug** for the region it is located
+ in as query parameters to `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. No
+ response body will be sent back, but the response code will indicate success.
+ Specifically, the response code will be a 204, which means that the action was
+ successful with no returned body data.
+
+ Args:
+ name: The block storage volume's name.
+
+ region: The slug identifier for the region where the resource is available.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "name": name,
+ "region": region,
+ },
+ volume_delete_by_name_params.VolumeDeleteByNameParams,
+ ),
+ ),
+ cast_to=NoneType,
+ )
+
+
+class VolumesResourceWithRawResponse:
+ def __init__(self, volumes: VolumesResource) -> None:
+ self._volumes = volumes
+
+ self.create = to_raw_response_wrapper(
+ volumes.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ volumes.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ volumes.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ volumes.delete,
+ )
+ self.delete_by_name = to_raw_response_wrapper(
+ volumes.delete_by_name,
+ )
+
+ @cached_property
+ def actions(self) -> ActionsResourceWithRawResponse:
+ """
+ Block storage actions are commands that can be given to a DigitalOcean
+ Block Storage Volume. An example would be detaching or attaching a volume
+ from a Droplet. These requests are made on the
+ `/v2/volumes/$VOLUME_ID/actions` endpoint.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return ActionsResourceWithRawResponse(self._volumes.actions)
+
+ @cached_property
+ def snapshots(self) -> SnapshotsResourceWithRawResponse:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return SnapshotsResourceWithRawResponse(self._volumes.snapshots)
+
+
+class AsyncVolumesResourceWithRawResponse:
+ def __init__(self, volumes: AsyncVolumesResource) -> None:
+ self._volumes = volumes
+
+ self.create = async_to_raw_response_wrapper(
+ volumes.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ volumes.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ volumes.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ volumes.delete,
+ )
+ self.delete_by_name = async_to_raw_response_wrapper(
+ volumes.delete_by_name,
+ )
+
+ @cached_property
+ def actions(self) -> AsyncActionsResourceWithRawResponse:
+ """
+ Block storage actions are commands that can be given to a DigitalOcean
+ Block Storage Volume. An example would be detaching or attaching a volume
+ from a Droplet. These requests are made on the
+ `/v2/volumes/$VOLUME_ID/actions` endpoint.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return AsyncActionsResourceWithRawResponse(self._volumes.actions)
+
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResourceWithRawResponse:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return AsyncSnapshotsResourceWithRawResponse(self._volumes.snapshots)
+
+
+class VolumesResourceWithStreamingResponse:
+ def __init__(self, volumes: VolumesResource) -> None:
+ self._volumes = volumes
+
+ self.create = to_streamed_response_wrapper(
+ volumes.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ volumes.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ volumes.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ volumes.delete,
+ )
+ self.delete_by_name = to_streamed_response_wrapper(
+ volumes.delete_by_name,
+ )
+
+ @cached_property
+ def actions(self) -> ActionsResourceWithStreamingResponse:
+ """
+ Block storage actions are commands that can be given to a DigitalOcean
+ Block Storage Volume. An example would be detaching or attaching a volume
+ from a Droplet. These requests are made on the
+ `/v2/volumes/$VOLUME_ID/actions` endpoint.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return ActionsResourceWithStreamingResponse(self._volumes.actions)
+
+ @cached_property
+ def snapshots(self) -> SnapshotsResourceWithStreamingResponse:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return SnapshotsResourceWithStreamingResponse(self._volumes.snapshots)
+
+
+class AsyncVolumesResourceWithStreamingResponse:
+ def __init__(self, volumes: AsyncVolumesResource) -> None:
+ self._volumes = volumes
+
+ self.create = async_to_streamed_response_wrapper(
+ volumes.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ volumes.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ volumes.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ volumes.delete,
+ )
+ self.delete_by_name = async_to_streamed_response_wrapper(
+ volumes.delete_by_name,
+ )
+
+ @cached_property
+ def actions(self) -> AsyncActionsResourceWithStreamingResponse:
+ """
+ Block storage actions are commands that can be given to a DigitalOcean
+ Block Storage Volume. An example would be detaching or attaching a volume
+ from a Droplet. These requests are made on the
+ `/v2/volumes/$VOLUME_ID/actions` endpoint.
+
+ An action object is returned. These objects hold the current status of the
+ requested action.
+ """
+ return AsyncActionsResourceWithStreamingResponse(self._volumes.actions)
+
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResourceWithStreamingResponse:
+ """
+ [DigitalOcean Block Storage Volumes](https://docs.digitalocean.com/products/volumes/)
+ provide expanded storage capacity for your Droplets and can be moved
+ between Droplets within a specific region.
+
+ Volumes function as raw block devices, meaning they appear to the
+ operating system as locally attached storage which can be formatted using
+ any file system supported by the OS. They may be created in sizes from
+ 1GiB to 16TiB.
+
+ By sending requests to the `/v2/volumes` endpoint, you can list, create, or
+ delete volumes as well as attach and detach them from Droplets
+ """
+ return AsyncSnapshotsResourceWithStreamingResponse(self._volumes.snapshots)
diff --git a/src/gradient/resources/images.py b/src/gradient/resources/images.py
new file mode 100644
index 00000000..c790345a
--- /dev/null
+++ b/src/gradient/resources/images.py
@@ -0,0 +1,709 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ..types import image_generate_params
+from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from .._utils import required_args, maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._streaming import Stream, AsyncStream
+from .._base_client import make_request_options
+from ..types.image_generate_response import ImageGenerateResponse
+from ..types.shared.image_gen_stream_event import ImageGenStreamEvent
+
+__all__ = ["ImagesResource", "AsyncImagesResource"]
+
+
+class ImagesResource(SyncAPIResource):
+ """Generate images from text prompts using various AI models."""
+
+ @cached_property
+ def with_raw_response(self) -> ImagesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ImagesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ImagesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ImagesResourceWithStreamingResponse(self)
+
+ @overload
+ def generate(
+ self,
+ *,
+ prompt: str,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ stream: Optional[Literal[False]] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageGenerateResponse:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def generate(
+ self,
+ *,
+ prompt: str,
+ stream: Literal[True],
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Stream[ImageGenStreamEvent]:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def generate(
+ self,
+ *,
+ prompt: str,
+ stream: bool,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageGenerateResponse | Stream[ImageGenStreamEvent]:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["prompt"], ["prompt", "stream"])
+ def generate(
+ self,
+ *,
+ prompt: str,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageGenerateResponse | Stream[ImageGenStreamEvent]:
+ if not self._client.model_access_key:
+ raise TypeError(
+ "Could not resolve authentication method. Expected model_access_key to be set for chat completions."
+ )
+ headers = extra_headers or {}
+ headers = {
+ "Authorization": f"Bearer {self._client.model_access_key}",
+ **headers,
+ }
+
+ return self._post(
+ "/images/generations"
+ if self._client._base_url_overridden
+ else f"{self._client.inference_endpoint}/v1/images/generations",
+ body=maybe_transform(
+ {
+ "prompt": prompt,
+ "background": background,
+ "model": model,
+ "moderation": moderation,
+ "n": n,
+ "output_compression": output_compression,
+ "output_format": output_format,
+ "partial_images": partial_images,
+ "quality": quality,
+ "size": size,
+ "stream": stream,
+ "user": user,
+ },
+ image_generate_params.ImageGenerateParamsStreaming
+ if stream
+ else image_generate_params.ImageGenerateParamsNonStreaming,
+ ),
+ options=make_request_options(
+ extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImageGenerateResponse,
+ stream=stream or False,
+ stream_cls=Stream[ImageGenStreamEvent],
+ )
+
+
+class AsyncImagesResource(AsyncAPIResource):
+ """Generate images from text prompts using various AI models."""
+
+ @cached_property
+ def with_raw_response(self) -> AsyncImagesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncImagesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncImagesResourceWithStreamingResponse(self)
+
+ @overload
+ async def generate(
+ self,
+ *,
+ prompt: str,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ stream: Optional[Literal[False]] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageGenerateResponse:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def generate(
+ self,
+ *,
+ prompt: str,
+ stream: Literal[True],
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncStream[ImageGenStreamEvent]:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def generate(
+ self,
+ *,
+ prompt: str,
+ stream: bool,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageGenerateResponse | AsyncStream[ImageGenStreamEvent]:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["prompt"], ["prompt", "stream"])
+ async def generate(
+ self,
+ *,
+ prompt: str,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ImageGenerateResponse | AsyncStream[ImageGenStreamEvent]:
+ if not self._client.model_access_key:
+ raise TypeError(
+ "Could not resolve authentication method. Expected model_access_key to be set for chat completions."
+ )
+ headers = extra_headers or {}
+ headers = {
+ "Authorization": f"Bearer {self._client.model_access_key}",
+ **headers,
+ }
+ return await self._post(
+ "/images/generations"
+ if self._client._base_url_overridden
+ else f"{self._client.inference_endpoint}/v1/images/generations",
+ body=await async_maybe_transform(
+ {
+ "prompt": prompt,
+ "background": background,
+ "model": model,
+ "moderation": moderation,
+ "n": n,
+ "output_compression": output_compression,
+ "output_format": output_format,
+ "partial_images": partial_images,
+ "quality": quality,
+ "size": size,
+ "stream": stream,
+ "user": user,
+ },
+ image_generate_params.ImageGenerateParamsStreaming
+ if stream
+ else image_generate_params.ImageGenerateParamsNonStreaming,
+ ),
+ options=make_request_options(
+ extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImageGenerateResponse,
+ stream=stream or False,
+ stream_cls=AsyncStream[ImageGenStreamEvent],
+ )
+
+
+class ImagesResourceWithRawResponse:
+ def __init__(self, images: ImagesResource) -> None:
+ self._images = images
+
+ self.generate = to_raw_response_wrapper(
+ images.generate,
+ )
+
+
+class AsyncImagesResourceWithRawResponse:
+ def __init__(self, images: AsyncImagesResource) -> None:
+ self._images = images
+
+ self.generate = async_to_raw_response_wrapper(
+ images.generate,
+ )
+
+
+class ImagesResourceWithStreamingResponse:
+ def __init__(self, images: ImagesResource) -> None:
+ self._images = images
+
+ self.generate = to_streamed_response_wrapper(
+ images.generate,
+ )
+
+
+class AsyncImagesResourceWithStreamingResponse:
+ def __init__(self, images: AsyncImagesResource) -> None:
+ self._images = images
+
+ self.generate = async_to_streamed_response_wrapper(
+ images.generate,
+ )
diff --git a/src/gradient/resources/inference/__init__.py b/src/gradient/resources/inference/__init__.py
new file mode 100644
index 00000000..21798ab2
--- /dev/null
+++ b/src/gradient/resources/inference/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .api_keys import (
+ APIKeysResource,
+ AsyncAPIKeysResource,
+ APIKeysResourceWithRawResponse,
+ AsyncAPIKeysResourceWithRawResponse,
+ APIKeysResourceWithStreamingResponse,
+ AsyncAPIKeysResourceWithStreamingResponse,
+)
+from .inference import (
+ InferenceResource,
+ AsyncInferenceResource,
+ InferenceResourceWithRawResponse,
+ AsyncInferenceResourceWithRawResponse,
+ InferenceResourceWithStreamingResponse,
+ AsyncInferenceResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "APIKeysResource",
+ "AsyncAPIKeysResource",
+ "APIKeysResourceWithRawResponse",
+ "AsyncAPIKeysResourceWithRawResponse",
+ "APIKeysResourceWithStreamingResponse",
+ "AsyncAPIKeysResourceWithStreamingResponse",
+ "InferenceResource",
+ "AsyncInferenceResource",
+ "InferenceResourceWithRawResponse",
+ "AsyncInferenceResourceWithRawResponse",
+ "InferenceResourceWithStreamingResponse",
+ "AsyncInferenceResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/inference/api_keys.py b/src/gradient/resources/inference/api_keys.py
new file mode 100644
index 00000000..045c6f41
--- /dev/null
+++ b/src/gradient/resources/inference/api_keys.py
@@ -0,0 +1,569 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.inference import api_key_list_params, api_key_create_params, api_key_update_params
+from ...types.inference.api_key_list_response import APIKeyListResponse
+from ...types.inference.api_key_create_response import APIKeyCreateResponse
+from ...types.inference.api_key_delete_response import APIKeyDeleteResponse
+from ...types.inference.api_key_update_response import APIKeyUpdateResponse
+from ...types.inference.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse
+
+__all__ = ["APIKeysResource", "AsyncAPIKeysResource"]
+
+
+class APIKeysResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> APIKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return APIKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return APIKeysResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyCreateResponse:
+ """
+ To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`.
+
+ Args:
+ name: A human friendly name to identify the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/models/api_keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/models/api_keys",
+ body=maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyCreateResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyUpdateResponse:
+ """
+ To update a model API key, send a PUT request to
+ `/v2/gen-ai/models/api_keys/{api_key_uuid}`.
+
+ Args:
+ body_api_key_uuid: API key ID
+
+ name: Name
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ api_key_update_params.APIKeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyListResponse:
+ """
+ To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/models/api_keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/models/api_keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ api_key_list_params.APIKeyListParams,
+ ),
+ ),
+ cast_to=APIKeyListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyDeleteResponse:
+ """
+ To delete an API key for a model, send a DELETE request to
+ `/v2/gen-ai/models/api_keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/models/api_keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyDeleteResponse,
+ )
+
+ def update_regenerate(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyUpdateRegenerateResponse:
+ """
+ To regenerate a model API key, send a PUT request to
+ `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyUpdateRegenerateResponse,
+ )
+
+
+class AsyncAPIKeysResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAPIKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncAPIKeysResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyCreateResponse:
+ """
+ To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`.
+
+ Args:
+ name: A human friendly name to identify the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/models/api_keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/models/api_keys",
+ body=await async_maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyCreateResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyUpdateResponse:
+ """
+ To update a model API key, send a PUT request to
+ `/v2/gen-ai/models/api_keys/{api_key_uuid}`.
+
+ Args:
+ body_api_key_uuid: API key ID
+
+ name: Name
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ api_key_update_params.APIKeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyListResponse:
+ """
+ To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/models/api_keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/models/api_keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ api_key_list_params.APIKeyListParams,
+ ),
+ ),
+ cast_to=APIKeyListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyDeleteResponse:
+ """
+ To delete an API key for a model, send a DELETE request to
+ `/v2/gen-ai/models/api_keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/models/api_keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyDeleteResponse,
+ )
+
+ async def update_regenerate(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> APIKeyUpdateRegenerateResponse:
+ """
+ To regenerate a model API key, send a PUT request to
+ `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=APIKeyUpdateRegenerateResponse,
+ )
+
+
+class APIKeysResourceWithRawResponse:
+ def __init__(self, api_keys: APIKeysResource) -> None:
+ self._api_keys = api_keys
+
+ self.create = to_raw_response_wrapper(
+ api_keys.create,
+ )
+ self.update = to_raw_response_wrapper(
+ api_keys.update,
+ )
+ self.list = to_raw_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ api_keys.delete,
+ )
+ self.update_regenerate = to_raw_response_wrapper(
+ api_keys.update_regenerate,
+ )
+
+
+class AsyncAPIKeysResourceWithRawResponse:
+ def __init__(self, api_keys: AsyncAPIKeysResource) -> None:
+ self._api_keys = api_keys
+
+ self.create = async_to_raw_response_wrapper(
+ api_keys.create,
+ )
+ self.update = async_to_raw_response_wrapper(
+ api_keys.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ api_keys.delete,
+ )
+ self.update_regenerate = async_to_raw_response_wrapper(
+ api_keys.update_regenerate,
+ )
+
+
+class APIKeysResourceWithStreamingResponse:
+ def __init__(self, api_keys: APIKeysResource) -> None:
+ self._api_keys = api_keys
+
+ self.create = to_streamed_response_wrapper(
+ api_keys.create,
+ )
+ self.update = to_streamed_response_wrapper(
+ api_keys.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ api_keys.delete,
+ )
+ self.update_regenerate = to_streamed_response_wrapper(
+ api_keys.update_regenerate,
+ )
+
+
+class AsyncAPIKeysResourceWithStreamingResponse:
+ def __init__(self, api_keys: AsyncAPIKeysResource) -> None:
+ self._api_keys = api_keys
+
+ self.create = async_to_streamed_response_wrapper(
+ api_keys.create,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ api_keys.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ api_keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ api_keys.delete,
+ )
+ self.update_regenerate = async_to_streamed_response_wrapper(
+ api_keys.update_regenerate,
+ )
diff --git a/src/gradient/resources/inference/inference.py b/src/gradient/resources/inference/inference.py
new file mode 100644
index 00000000..1da78154
--- /dev/null
+++ b/src/gradient/resources/inference/inference.py
@@ -0,0 +1,120 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .api_keys import (
+ APIKeysResource,
+ AsyncAPIKeysResource,
+ APIKeysResourceWithRawResponse,
+ AsyncAPIKeysResourceWithRawResponse,
+ APIKeysResourceWithStreamingResponse,
+ AsyncAPIKeysResourceWithStreamingResponse,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["InferenceResource", "AsyncInferenceResource"]
+
+
+class InferenceResource(SyncAPIResource):
+ @cached_property
+ def api_keys(self) -> APIKeysResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return APIKeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> InferenceResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return InferenceResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> InferenceResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return InferenceResourceWithStreamingResponse(self)
+
+
+class AsyncInferenceResource(AsyncAPIResource):
+ @cached_property
+ def api_keys(self) -> AsyncAPIKeysResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAPIKeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncInferenceResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncInferenceResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncInferenceResourceWithStreamingResponse(self)
+
+
+class InferenceResourceWithRawResponse:
+ def __init__(self, inference: InferenceResource) -> None:
+ self._inference = inference
+
+ @cached_property
+ def api_keys(self) -> APIKeysResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return APIKeysResourceWithRawResponse(self._inference.api_keys)
+
+
+class AsyncInferenceResourceWithRawResponse:
+ def __init__(self, inference: AsyncInferenceResource) -> None:
+ self._inference = inference
+
+ @cached_property
+ def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAPIKeysResourceWithRawResponse(self._inference.api_keys)
+
+
+class InferenceResourceWithStreamingResponse:
+ def __init__(self, inference: InferenceResource) -> None:
+ self._inference = inference
+
+ @cached_property
+ def api_keys(self) -> APIKeysResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return APIKeysResourceWithStreamingResponse(self._inference.api_keys)
+
+
+class AsyncInferenceResourceWithStreamingResponse:
+ def __init__(self, inference: AsyncInferenceResource) -> None:
+ self._inference = inference
+
+ @cached_property
+ def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAPIKeysResourceWithStreamingResponse(self._inference.api_keys)
diff --git a/src/gradient/resources/knowledge_bases/__init__.py b/src/gradient/resources/knowledge_bases/__init__.py
new file mode 100644
index 00000000..353dc05c
--- /dev/null
+++ b/src/gradient/resources/knowledge_bases/__init__.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .data_sources import (
+ DataSourcesResource,
+ AsyncDataSourcesResource,
+ DataSourcesResourceWithRawResponse,
+ AsyncDataSourcesResourceWithRawResponse,
+ DataSourcesResourceWithStreamingResponse,
+ AsyncDataSourcesResourceWithStreamingResponse,
+)
+from .indexing_jobs import (
+ IndexingJobsResource,
+ AsyncIndexingJobsResource,
+ IndexingJobsResourceWithRawResponse,
+ AsyncIndexingJobsResourceWithRawResponse,
+ IndexingJobsResourceWithStreamingResponse,
+ AsyncIndexingJobsResourceWithStreamingResponse,
+)
+from .knowledge_bases import (
+ KnowledgeBasesResource,
+ KnowledgeBaseTimeoutError,
+ KnowledgeBaseDatabaseError,
+ AsyncKnowledgeBasesResource,
+ KnowledgeBasesResourceWithRawResponse,
+ AsyncKnowledgeBasesResourceWithRawResponse,
+ KnowledgeBasesResourceWithStreamingResponse,
+ AsyncKnowledgeBasesResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "DataSourcesResource",
+ "AsyncDataSourcesResource",
+ "DataSourcesResourceWithRawResponse",
+ "AsyncDataSourcesResourceWithRawResponse",
+ "DataSourcesResourceWithStreamingResponse",
+ "AsyncDataSourcesResourceWithStreamingResponse",
+ "IndexingJobsResource",
+ "AsyncIndexingJobsResource",
+ "IndexingJobsResourceWithRawResponse",
+ "AsyncIndexingJobsResourceWithRawResponse",
+ "IndexingJobsResourceWithStreamingResponse",
+ "AsyncIndexingJobsResourceWithStreamingResponse",
+ "KnowledgeBasesResource",
+ "AsyncKnowledgeBasesResource",
+ "KnowledgeBaseDatabaseError",
+ "KnowledgeBaseTimeoutError",
+ "KnowledgeBasesResourceWithRawResponse",
+ "AsyncKnowledgeBasesResourceWithRawResponse",
+ "KnowledgeBasesResourceWithStreamingResponse",
+ "AsyncKnowledgeBasesResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/knowledge_bases/data_sources.py b/src/gradient/resources/knowledge_bases/data_sources.py
new file mode 100644
index 00000000..5c6eafd2
--- /dev/null
+++ b/src/gradient/resources/knowledge_bases/data_sources.py
@@ -0,0 +1,754 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.knowledge_bases import (
+ data_source_list_params,
+ data_source_create_params,
+ data_source_update_params,
+ data_source_create_presigned_urls_params,
+)
+from ...types.knowledge_bases.aws_data_source_param import AwsDataSourceParam
+from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse
+from ...types.knowledge_bases.data_source_create_response import DataSourceCreateResponse
+from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse
+from ...types.knowledge_bases.data_source_update_response import DataSourceUpdateResponse
+from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam
+from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam
+from ...types.knowledge_bases.data_source_create_presigned_urls_response import DataSourceCreatePresignedURLsResponse
+
+__all__ = ["DataSourcesResource", "AsyncDataSourcesResource"]
+
+
+class DataSourcesResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> DataSourcesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return DataSourcesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return DataSourcesResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ path_knowledge_base_uuid: str,
+ *,
+ aws_data_source: AwsDataSourceParam | Omit = omit,
+ chunking_algorithm: Literal[
+ "CHUNKING_ALGORITHM_UNKNOWN",
+ "CHUNKING_ALGORITHM_SECTION_BASED",
+ "CHUNKING_ALGORITHM_HIERARCHICAL",
+ "CHUNKING_ALGORITHM_SEMANTIC",
+ "CHUNKING_ALGORITHM_FIXED_LENGTH",
+ ]
+ | Omit = omit,
+ chunking_options: data_source_create_params.ChunkingOptions | Omit = omit,
+ body_knowledge_base_uuid: str | Omit = omit,
+ spaces_data_source: APISpacesDataSourceParam | Omit = omit,
+ web_crawler_data_source: APIWebCrawlerDataSourceParam | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceCreateResponse:
+ """
+ To add a data source to a knowledge base, send a POST request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`.
+
+ Args:
+ aws_data_source: AWS S3 Data Source
+
+ chunking_algorithm: The chunking algorithm to use for processing data sources.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+
+ chunking_options: Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+
+ body_knowledge_base_uuid: Knowledge base id
+
+ spaces_data_source: Spaces Bucket Data Source
+
+ web_crawler_data_source: WebCrawlerDataSource
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}"
+ )
+ return self._post(
+ f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources",
+ body=maybe_transform(
+ {
+ "aws_data_source": aws_data_source,
+ "chunking_algorithm": chunking_algorithm,
+ "chunking_options": chunking_options,
+ "body_knowledge_base_uuid": body_knowledge_base_uuid,
+ "spaces_data_source": spaces_data_source,
+ "web_crawler_data_source": web_crawler_data_source,
+ },
+ data_source_create_params.DataSourceCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DataSourceCreateResponse,
+ )
+
+ def update(
+ self,
+ path_data_source_uuid: str,
+ *,
+ path_knowledge_base_uuid: str,
+ chunking_algorithm: Literal[
+ "CHUNKING_ALGORITHM_UNKNOWN",
+ "CHUNKING_ALGORITHM_SECTION_BASED",
+ "CHUNKING_ALGORITHM_HIERARCHICAL",
+ "CHUNKING_ALGORITHM_SEMANTIC",
+ "CHUNKING_ALGORITHM_FIXED_LENGTH",
+ ]
+ | Omit = omit,
+ chunking_options: data_source_update_params.ChunkingOptions | Omit = omit,
+ body_data_source_uuid: str | Omit = omit,
+ body_knowledge_base_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceUpdateResponse:
+ """To update a data source (e.g.
+
+ chunking options), send a PUT request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`.
+
+ Args:
+ chunking_algorithm: The chunking algorithm to use for processing data sources.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+
+ chunking_options: Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+
+ body_data_source_uuid: Data Source ID (Path Parameter)
+
+ body_knowledge_base_uuid: Knowledge Base ID (Path Parameter)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}"
+ )
+ if not path_data_source_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_data_source_uuid` but received {path_data_source_uuid!r}"
+ )
+ return self._put(
+ f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources/{path_data_source_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources/{path_data_source_uuid}",
+ body=maybe_transform(
+ {
+ "chunking_algorithm": chunking_algorithm,
+ "chunking_options": chunking_options,
+ "body_data_source_uuid": body_data_source_uuid,
+ "body_knowledge_base_uuid": body_knowledge_base_uuid,
+ },
+ data_source_update_params.DataSourceUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DataSourceUpdateResponse,
+ )
+
+ def list(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceListResponse:
+ """
+ To list all data sources for a knowledge base, send a GET request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return self._get(
+ f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ data_source_list_params.DataSourceListParams,
+ ),
+ ),
+ cast_to=DataSourceListResponse,
+ )
+
+ def delete(
+ self,
+ data_source_uuid: str,
+ *,
+ knowledge_base_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceDeleteResponse:
+ """
+ To delete a data source from a knowledge base, send a DELETE request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ if not data_source_uuid:
+ raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DataSourceDeleteResponse,
+ )
+
+ def create_presigned_urls(
+ self,
+ *,
+ files: Iterable[data_source_create_presigned_urls_params.File] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceCreatePresignedURLsResponse:
+ """
+ To create presigned URLs for knowledge base data source file upload, send a POST
+ request to `/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls`.
+
+ Args:
+ files: A list of files to generate presigned URLs for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls",
+ body=maybe_transform(
+ {"files": files}, data_source_create_presigned_urls_params.DataSourceCreatePresignedURLsParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DataSourceCreatePresignedURLsResponse,
+ )
+
+
+class AsyncDataSourcesResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncDataSourcesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncDataSourcesResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ path_knowledge_base_uuid: str,
+ *,
+ aws_data_source: AwsDataSourceParam | Omit = omit,
+ chunking_algorithm: Literal[
+ "CHUNKING_ALGORITHM_UNKNOWN",
+ "CHUNKING_ALGORITHM_SECTION_BASED",
+ "CHUNKING_ALGORITHM_HIERARCHICAL",
+ "CHUNKING_ALGORITHM_SEMANTIC",
+ "CHUNKING_ALGORITHM_FIXED_LENGTH",
+ ]
+ | Omit = omit,
+ chunking_options: data_source_create_params.ChunkingOptions | Omit = omit,
+ body_knowledge_base_uuid: str | Omit = omit,
+ spaces_data_source: APISpacesDataSourceParam | Omit = omit,
+ web_crawler_data_source: APIWebCrawlerDataSourceParam | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceCreateResponse:
+ """
+ To add a data source to a knowledge base, send a POST request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`.
+
+ Args:
+ aws_data_source: AWS S3 Data Source
+
+ chunking_algorithm: The chunking algorithm to use for processing data sources.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+
+ chunking_options: Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+
+ body_knowledge_base_uuid: Knowledge base id
+
+ spaces_data_source: Spaces Bucket Data Source
+
+ web_crawler_data_source: WebCrawlerDataSource
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}"
+ )
+ return await self._post(
+ f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources",
+ body=await async_maybe_transform(
+ {
+ "aws_data_source": aws_data_source,
+ "chunking_algorithm": chunking_algorithm,
+ "chunking_options": chunking_options,
+ "body_knowledge_base_uuid": body_knowledge_base_uuid,
+ "spaces_data_source": spaces_data_source,
+ "web_crawler_data_source": web_crawler_data_source,
+ },
+ data_source_create_params.DataSourceCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DataSourceCreateResponse,
+ )
+
+ async def update(
+ self,
+ path_data_source_uuid: str,
+ *,
+ path_knowledge_base_uuid: str,
+ chunking_algorithm: Literal[
+ "CHUNKING_ALGORITHM_UNKNOWN",
+ "CHUNKING_ALGORITHM_SECTION_BASED",
+ "CHUNKING_ALGORITHM_HIERARCHICAL",
+ "CHUNKING_ALGORITHM_SEMANTIC",
+ "CHUNKING_ALGORITHM_FIXED_LENGTH",
+ ]
+ | Omit = omit,
+ chunking_options: data_source_update_params.ChunkingOptions | Omit = omit,
+ body_data_source_uuid: str | Omit = omit,
+ body_knowledge_base_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceUpdateResponse:
+ """To update a data source (e.g.
+
+ chunking options), send a PUT request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`.
+
+ Args:
+ chunking_algorithm: The chunking algorithm to use for processing data sources.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+
+ chunking_options: Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+
+ body_data_source_uuid: Data Source ID (Path Parameter)
+
+ body_knowledge_base_uuid: Knowledge Base ID (Path Parameter)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}"
+ )
+ if not path_data_source_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_data_source_uuid` but received {path_data_source_uuid!r}"
+ )
+ return await self._put(
+ f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources/{path_data_source_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources/{path_data_source_uuid}",
+ body=await async_maybe_transform(
+ {
+ "chunking_algorithm": chunking_algorithm,
+ "chunking_options": chunking_options,
+ "body_data_source_uuid": body_data_source_uuid,
+ "body_knowledge_base_uuid": body_knowledge_base_uuid,
+ },
+ data_source_update_params.DataSourceUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DataSourceUpdateResponse,
+ )
+
+ async def list(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceListResponse:
+ """
+ To list all data sources for a knowledge base, send a GET request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ data_source_list_params.DataSourceListParams,
+ ),
+ ),
+ cast_to=DataSourceListResponse,
+ )
+
+ async def delete(
+ self,
+ data_source_uuid: str,
+ *,
+ knowledge_base_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceDeleteResponse:
+ """
+ To delete a data source from a knowledge base, send a DELETE request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ if not data_source_uuid:
+ raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DataSourceDeleteResponse,
+ )
+
+ async def create_presigned_urls(
+ self,
+ *,
+ files: Iterable[data_source_create_presigned_urls_params.File] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceCreatePresignedURLsResponse:
+ """
+ To create presigned URLs for knowledge base data source file upload, send a POST
+ request to `/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls`.
+
+ Args:
+ files: A list of files to generate presigned URLs for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls",
+ body=await async_maybe_transform(
+ {"files": files}, data_source_create_presigned_urls_params.DataSourceCreatePresignedURLsParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DataSourceCreatePresignedURLsResponse,
+ )
+
+
+class DataSourcesResourceWithRawResponse:
+ def __init__(self, data_sources: DataSourcesResource) -> None:
+ self._data_sources = data_sources
+
+ self.create = to_raw_response_wrapper(
+ data_sources.create,
+ )
+ self.update = to_raw_response_wrapper(
+ data_sources.update,
+ )
+ self.list = to_raw_response_wrapper(
+ data_sources.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ data_sources.delete,
+ )
+ self.create_presigned_urls = to_raw_response_wrapper(
+ data_sources.create_presigned_urls,
+ )
+
+
+class AsyncDataSourcesResourceWithRawResponse:
+ def __init__(self, data_sources: AsyncDataSourcesResource) -> None:
+ self._data_sources = data_sources
+
+ self.create = async_to_raw_response_wrapper(
+ data_sources.create,
+ )
+ self.update = async_to_raw_response_wrapper(
+ data_sources.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ data_sources.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ data_sources.delete,
+ )
+ self.create_presigned_urls = async_to_raw_response_wrapper(
+ data_sources.create_presigned_urls,
+ )
+
+
+class DataSourcesResourceWithStreamingResponse:
+ def __init__(self, data_sources: DataSourcesResource) -> None:
+ self._data_sources = data_sources
+
+ self.create = to_streamed_response_wrapper(
+ data_sources.create,
+ )
+ self.update = to_streamed_response_wrapper(
+ data_sources.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ data_sources.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ data_sources.delete,
+ )
+ self.create_presigned_urls = to_streamed_response_wrapper(
+ data_sources.create_presigned_urls,
+ )
+
+
+class AsyncDataSourcesResourceWithStreamingResponse:
+ def __init__(self, data_sources: AsyncDataSourcesResource) -> None:
+ self._data_sources = data_sources
+
+ self.create = async_to_streamed_response_wrapper(
+ data_sources.create,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ data_sources.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ data_sources.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ data_sources.delete,
+ )
+ self.create_presigned_urls = async_to_streamed_response_wrapper(
+ data_sources.create_presigned_urls,
+ )
diff --git a/src/gradient/resources/knowledge_bases/indexing_jobs.py b/src/gradient/resources/knowledge_bases/indexing_jobs.py
new file mode 100644
index 00000000..47db5b10
--- /dev/null
+++ b/src/gradient/resources/knowledge_bases/indexing_jobs.py
@@ -0,0 +1,886 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import time
+import asyncio
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._exceptions import IndexingJobError, IndexingJobTimeoutError
+from ..._base_client import make_request_options
+from ...types.knowledge_bases import (
+ indexing_job_list_params,
+ indexing_job_create_params,
+ indexing_job_update_cancel_params,
+)
+from ...types.knowledge_bases.indexing_job_list_response import IndexingJobListResponse
+from ...types.knowledge_bases.indexing_job_create_response import IndexingJobCreateResponse
+from ...types.knowledge_bases.indexing_job_retrieve_response import IndexingJobRetrieveResponse
+from ...types.knowledge_bases.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse
+from ...types.knowledge_bases.indexing_job_retrieve_signed_url_response import IndexingJobRetrieveSignedURLResponse
+from ...types.knowledge_bases.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse
+
+__all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"]
+
+
+class IndexingJobsResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> IndexingJobsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return IndexingJobsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return IndexingJobsResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ data_source_uuids: SequenceNotStr[str] | Omit = omit,
+ knowledge_base_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobCreateResponse:
+ """
+ To start an indexing job for a knowledge base, send a POST request to
+ `/v2/gen-ai/indexing_jobs`.
+
+ Args:
+ data_source_uuids: List of data source ids to index, if none are provided, all data sources will be
+ indexed
+
+ knowledge_base_uuid: Knowledge base id
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/indexing_jobs"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs",
+ body=maybe_transform(
+ {
+ "data_source_uuids": data_source_uuids,
+ "knowledge_base_uuid": knowledge_base_uuid,
+ },
+ indexing_job_create_params.IndexingJobCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobRetrieveResponse:
+ """
+ To get status of an indexing Job for a knowledge base, send a GET request to
+ `/v2/gen-ai/indexing_jobs/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/indexing_jobs/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobListResponse:
+ """
+ To list all indexing jobs for a knowledge base, send a GET request to
+ `/v2/gen-ai/indexing_jobs`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/indexing_jobs"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ indexing_job_list_params.IndexingJobListParams,
+ ),
+ ),
+ cast_to=IndexingJobListResponse,
+ )
+
+ def retrieve_data_sources(
+ self,
+ indexing_job_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobRetrieveDataSourcesResponse:
+ """
+ To list all datasources for an indexing job, send a GET request to
+ `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not indexing_job_uuid:
+ raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobRetrieveDataSourcesResponse,
+ )
+
+ def retrieve_signed_url(
+ self,
+ indexing_job_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobRetrieveSignedURLResponse:
+ """
+ To get a signed URL for indexing job details, send a GET request to
+ `/v2/gen-ai/indexing_jobs/{uuid}/details_signed_url`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not indexing_job_uuid:
+ raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobRetrieveSignedURLResponse,
+ )
+
+ def update_cancel(
+ self,
+ path_uuid: str,
+ *,
+ body_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobUpdateCancelResponse:
+ """
+ To cancel an indexing job for a knowledge base, send a PUT request to
+ `/v2/gen-ai/indexing_jobs/{uuid}/cancel`.
+
+ Args:
+ body_uuid: A unique identifier for an indexing job.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{path_uuid}/cancel",
+ body=maybe_transform(
+ {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobUpdateCancelResponse,
+ )
+
+ def wait_for_completion(
+ self,
+ uuid: str,
+ *,
+ poll_interval: float = 5,
+ timeout: float | None = None,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ request_timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobRetrieveResponse:
+ """
+ Wait for an indexing job to complete by polling its status.
+
+ This method polls the indexing job status at regular intervals until it reaches
+ a terminal state (succeeded, failed, error, or cancelled). It raises an exception
+ if the job fails or times out.
+
+ Args:
+ uuid: The UUID of the indexing job to wait for.
+
+ poll_interval: Time in seconds between status checks (default: 5 seconds).
+
+ timeout: Maximum time in seconds to wait for completion. If None, waits indefinitely.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ request_timeout: Override the client-level default timeout for this request, in seconds
+
+ Returns:
+ The final IndexingJobRetrieveResponse when the job completes successfully.
+
+ Raises:
+ IndexingJobTimeoutError: If the job doesn't complete within the specified timeout.
+ IndexingJobError: If the job fails, errors, or is cancelled.
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+
+ start_time = time.time()
+
+ while True:
+ response = self.retrieve(
+ uuid,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=request_timeout,
+ )
+
+ # Check if job is in a terminal state
+ if response.job and response.job.phase:
+ phase = response.job.phase
+
+ # Success state
+ if phase == "BATCH_JOB_PHASE_SUCCEEDED":
+ return response
+
+ # Failure states
+ if phase == "BATCH_JOB_PHASE_FAILED":
+ raise IndexingJobError(
+ f"Indexing job {uuid} failed. ",
+ uuid=uuid,
+ phase=phase,
+ )
+
+ if phase == "BATCH_JOB_PHASE_ERROR":
+ raise IndexingJobError(
+ f"Indexing job {uuid} encountered an error",
+ uuid=uuid,
+ phase=phase,
+ )
+
+ if phase == "BATCH_JOB_PHASE_CANCELLED":
+ raise IndexingJobError(
+ f"Indexing job {uuid} was cancelled",
+ uuid=uuid,
+ phase=phase,
+ )
+
+ # Still in progress (UNKNOWN, PENDING, or RUNNING)
+ # Check timeout
+ if timeout is not None:
+ elapsed = time.time() - start_time
+ if elapsed >= timeout:
+ raise IndexingJobTimeoutError(
+ f"Indexing job {uuid} did not complete within {timeout} seconds. Current phase: {phase}",
+ uuid=uuid,
+ phase=phase,
+ timeout=timeout,
+ )
+
+ # Wait before next poll
+ time.sleep(poll_interval)
+
+
+class AsyncIndexingJobsResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncIndexingJobsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncIndexingJobsResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ data_source_uuids: SequenceNotStr[str] | Omit = omit,
+ knowledge_base_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobCreateResponse:
+ """
+ To start an indexing job for a knowledge base, send a POST request to
+ `/v2/gen-ai/indexing_jobs`.
+
+ Args:
+ data_source_uuids: List of data source ids to index, if none are provided, all data sources will be
+ indexed
+
+ knowledge_base_uuid: Knowledge base id
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/indexing_jobs"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs",
+ body=await async_maybe_transform(
+ {
+ "data_source_uuids": data_source_uuids,
+ "knowledge_base_uuid": knowledge_base_uuid,
+ },
+ indexing_job_create_params.IndexingJobCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobRetrieveResponse:
+ """
+ To get status of an indexing Job for a knowledge base, send a GET request to
+ `/v2/gen-ai/indexing_jobs/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/indexing_jobs/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobListResponse:
+ """
+ To list all indexing jobs for a knowledge base, send a GET request to
+ `/v2/gen-ai/indexing_jobs`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/indexing_jobs"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ indexing_job_list_params.IndexingJobListParams,
+ ),
+ ),
+ cast_to=IndexingJobListResponse,
+ )
+
+ async def retrieve_data_sources(
+ self,
+ indexing_job_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobRetrieveDataSourcesResponse:
+ """
+ To list all datasources for an indexing job, send a GET request to
+ `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not indexing_job_uuid:
+ raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobRetrieveDataSourcesResponse,
+ )
+
+ async def retrieve_signed_url(
+ self,
+ indexing_job_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobRetrieveSignedURLResponse:
+ """
+ To get a signed URL for indexing job details, send a GET request to
+ `/v2/gen-ai/indexing_jobs/{uuid}/details_signed_url`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not indexing_job_uuid:
+ raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobRetrieveSignedURLResponse,
+ )
+
+ async def update_cancel(
+ self,
+ path_uuid: str,
+ *,
+ body_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobUpdateCancelResponse:
+ """
+ To cancel an indexing job for a knowledge base, send a PUT request to
+ `/v2/gen-ai/indexing_jobs/{uuid}/cancel`.
+
+ Args:
+ body_uuid: A unique identifier for an indexing job.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{path_uuid}/cancel",
+ body=await async_maybe_transform(
+ {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobUpdateCancelResponse,
+ )
+
+ async def wait_for_completion(
+ self,
+ uuid: str,
+ *,
+ poll_interval: float = 5,
+ timeout: float | None = None,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ request_timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobRetrieveResponse:
+ """
+ Wait for an indexing job to complete by polling its status.
+
+ This method polls the indexing job status at regular intervals until it reaches
+ a terminal state (succeeded, failed, error, or cancelled). It raises an exception
+ if the job fails or times out.
+
+ Args:
+ uuid: The UUID of the indexing job to wait for.
+
+ poll_interval: Time in seconds between status checks (default: 5 seconds).
+
+ timeout: Maximum time in seconds to wait for completion. If None, waits indefinitely.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ request_timeout: Override the client-level default timeout for this request, in seconds
+
+ Returns:
+ The final IndexingJobRetrieveResponse when the job completes successfully.
+
+ Raises:
+ IndexingJobTimeoutError: If the job doesn't complete within the specified timeout.
+ IndexingJobError: If the job fails, errors, or is cancelled.
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+
+ start_time = time.time()
+
+ while True:
+ response = await self.retrieve(
+ uuid,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=request_timeout,
+ )
+
+ # Check if job is in a terminal state
+ if response.job and response.job.phase:
+ phase = response.job.phase
+
+ # Success state
+ if phase == "BATCH_JOB_PHASE_SUCCEEDED":
+ return response
+
+ # Failure states
+ if phase == "BATCH_JOB_PHASE_FAILED":
+ raise IndexingJobError(
+ f"Indexing job {uuid} failed. ",
+ uuid=uuid,
+ phase=phase,
+ )
+
+ if phase == "BATCH_JOB_PHASE_ERROR":
+ raise IndexingJobError(
+ f"Indexing job {uuid} encountered an error",
+ uuid=uuid,
+ phase=phase,
+ )
+
+ if phase == "BATCH_JOB_PHASE_CANCELLED":
+ raise IndexingJobError(
+ f"Indexing job {uuid} was cancelled",
+ uuid=uuid,
+ phase=phase,
+ )
+
+ # Still in progress (UNKNOWN, PENDING, or RUNNING)
+ # Check timeout
+ if timeout is not None:
+ elapsed = time.time() - start_time
+ if elapsed >= timeout:
+ raise IndexingJobTimeoutError(
+ f"Indexing job {uuid} did not complete within {timeout} seconds. Current phase: {phase}",
+ uuid=uuid,
+ phase=phase,
+ timeout=timeout,
+ )
+
+ # Wait before next poll
+ await asyncio.sleep(poll_interval)
+
+
+class IndexingJobsResourceWithRawResponse:
+ def __init__(self, indexing_jobs: IndexingJobsResource) -> None:
+ self._indexing_jobs = indexing_jobs
+
+ self.create = to_raw_response_wrapper(
+ indexing_jobs.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ indexing_jobs.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ indexing_jobs.list,
+ )
+ self.retrieve_data_sources = to_raw_response_wrapper(
+ indexing_jobs.retrieve_data_sources,
+ )
+ self.retrieve_signed_url = to_raw_response_wrapper(
+ indexing_jobs.retrieve_signed_url,
+ )
+ self.update_cancel = to_raw_response_wrapper(
+ indexing_jobs.update_cancel,
+ )
+ self.wait_for_completion = to_raw_response_wrapper(
+ indexing_jobs.wait_for_completion,
+ )
+
+
+class AsyncIndexingJobsResourceWithRawResponse:
+ def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None:
+ self._indexing_jobs = indexing_jobs
+
+ self.create = async_to_raw_response_wrapper(
+ indexing_jobs.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ indexing_jobs.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ indexing_jobs.list,
+ )
+ self.retrieve_data_sources = async_to_raw_response_wrapper(
+ indexing_jobs.retrieve_data_sources,
+ )
+ self.retrieve_signed_url = async_to_raw_response_wrapper(
+ indexing_jobs.retrieve_signed_url,
+ )
+ self.update_cancel = async_to_raw_response_wrapper(
+ indexing_jobs.update_cancel,
+ )
+ self.wait_for_completion = async_to_raw_response_wrapper(
+ indexing_jobs.wait_for_completion,
+ )
+
+
+class IndexingJobsResourceWithStreamingResponse:
+ def __init__(self, indexing_jobs: IndexingJobsResource) -> None:
+ self._indexing_jobs = indexing_jobs
+
+ self.create = to_streamed_response_wrapper(
+ indexing_jobs.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ indexing_jobs.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ indexing_jobs.list,
+ )
+ self.retrieve_data_sources = to_streamed_response_wrapper(
+ indexing_jobs.retrieve_data_sources,
+ )
+ self.retrieve_signed_url = to_streamed_response_wrapper(
+ indexing_jobs.retrieve_signed_url,
+ )
+ self.update_cancel = to_streamed_response_wrapper(
+ indexing_jobs.update_cancel,
+ )
+ self.wait_for_completion = to_streamed_response_wrapper(
+ indexing_jobs.wait_for_completion,
+ )
+
+
+class AsyncIndexingJobsResourceWithStreamingResponse:
+ def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None:
+ self._indexing_jobs = indexing_jobs
+
+ self.create = async_to_streamed_response_wrapper(
+ indexing_jobs.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ indexing_jobs.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ indexing_jobs.list,
+ )
+ self.retrieve_data_sources = async_to_streamed_response_wrapper(
+ indexing_jobs.retrieve_data_sources,
+ )
+ self.retrieve_signed_url = async_to_streamed_response_wrapper(
+ indexing_jobs.retrieve_signed_url,
+ )
+ self.update_cancel = async_to_streamed_response_wrapper(
+ indexing_jobs.update_cancel,
+ )
+ self.wait_for_completion = async_to_streamed_response_wrapper(
+ indexing_jobs.wait_for_completion,
+ )
diff --git a/src/gradient/resources/knowledge_bases/knowledge_bases.py b/src/gradient/resources/knowledge_bases/knowledge_bases.py
new file mode 100644
index 00000000..dd4c3a8f
--- /dev/null
+++ b/src/gradient/resources/knowledge_bases/knowledge_bases.py
@@ -0,0 +1,1120 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import time
+import asyncio
+from typing import Iterable
+
+import httpx
+
+from ...types import (
+ knowledge_base_list_params,
+ knowledge_base_create_params,
+ knowledge_base_update_params,
+)
+from ..._types import (
+ Body,
+ Omit,
+ Query,
+ Headers,
+ NotGiven,
+ SequenceNotStr,
+ omit,
+ not_given,
+)
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .data_sources import (
+ DataSourcesResource,
+ AsyncDataSourcesResource,
+ DataSourcesResourceWithRawResponse,
+ AsyncDataSourcesResourceWithRawResponse,
+ DataSourcesResourceWithStreamingResponse,
+ AsyncDataSourcesResourceWithStreamingResponse,
+)
+from .indexing_jobs import (
+ IndexingJobsResource,
+ AsyncIndexingJobsResource,
+ IndexingJobsResourceWithRawResponse,
+ AsyncIndexingJobsResourceWithRawResponse,
+ IndexingJobsResourceWithStreamingResponse,
+ AsyncIndexingJobsResourceWithStreamingResponse,
+)
+from ..._base_client import make_request_options
+from ...types.knowledge_base_list_response import KnowledgeBaseListResponse
+from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse
+from ...types.knowledge_base_delete_response import KnowledgeBaseDeleteResponse
+from ...types.knowledge_base_update_response import KnowledgeBaseUpdateResponse
+from ...types.knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse
+from ...types.knowledge_base_list_indexing_jobs_response import (
+ KnowledgeBaseListIndexingJobsResponse,
+)
+
+__all__ = [
+ "KnowledgeBasesResource",
+ "AsyncKnowledgeBasesResource",
+ "KnowledgeBaseDatabaseError",
+ "KnowledgeBaseTimeoutError",
+]
+
+
+class KnowledgeBaseDatabaseError(Exception):
+ """Raised when a knowledge base database enters a failed state."""
+
+ pass
+
+
+class KnowledgeBaseTimeoutError(Exception):
+ """Raised when waiting for a knowledge base database times out."""
+
+ pass
+
+
+class KnowledgeBasesResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def data_sources(self) -> DataSourcesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return DataSourcesResource(self._client)
+
+ @cached_property
+ def indexing_jobs(self) -> IndexingJobsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return IndexingJobsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return KnowledgeBasesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return KnowledgeBasesResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ database_id: str | Omit = omit,
+ datasources: Iterable[knowledge_base_create_params.Datasource] | Omit = omit,
+ embedding_model_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ project_id: str | Omit = omit,
+ region: str | Omit = omit,
+ tags: SequenceNotStr[str] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseCreateResponse:
+ """
+ To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`.
+
+ Args:
+ database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use,
+ optional. If not provided, we create a new database for the knowledge base in
+ the same region as the knowledge base.
+
+ datasources: The data sources to use for this knowledge base. See
+ [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets)
+ for more information on data sources best practices.
+
+ embedding_model_uuid: Identifier for the
+ [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models).
+
+ name: Name of the knowledge base.
+
+ project_id: Identifier of the DigitalOcean project this knowledge base will belong to.
+
+ region: The datacenter region to deploy the knowledge base in.
+
+ tags: Tags to organize your knowledge base.
+
+ vpc_uuid: The VPC to deploy the knowledge base database in
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ (
+ "/v2/gen-ai/knowledge_bases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases"
+ ),
+ body=maybe_transform(
+ {
+ "database_id": database_id,
+ "datasources": datasources,
+ "embedding_model_uuid": embedding_model_uuid,
+ "name": name,
+ "project_id": project_id,
+ "region": region,
+ "tags": tags,
+ "vpc_uuid": vpc_uuid,
+ },
+ knowledge_base_create_params.KnowledgeBaseCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseRetrieveResponse:
+ """
+ To retrive information about an existing knowledge base, send a GET request to
+ `/v2/gen-ai/knowledge_bases/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ (
+ f"/v2/gen-ai/knowledge_bases/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}"
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_uuid: str,
+ *,
+ database_id: str | Omit = omit,
+ embedding_model_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ project_id: str | Omit = omit,
+ tags: SequenceNotStr[str] | Omit = omit,
+ body_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseUpdateResponse:
+ """
+ To update a knowledge base, send a PUT request to
+ `/v2/gen-ai/knowledge_bases/{uuid}`.
+
+ Args:
+ database_id: The id of the DigitalOcean database this knowledge base will use, optiona.
+
+ embedding_model_uuid: Identifier for the foundation model.
+
+ name: Knowledge base name
+
+ project_id: The id of the DigitalOcean project this knowledge base will belong to
+
+ tags: Tags to organize your knowledge base.
+
+ body_uuid: Knowledge base id
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ return self._put(
+ (
+ f"/v2/gen-ai/knowledge_bases/{path_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_uuid}"
+ ),
+ body=maybe_transform(
+ {
+ "database_id": database_id,
+ "embedding_model_uuid": embedding_model_uuid,
+ "name": name,
+ "project_id": project_id,
+ "tags": tags,
+ "body_uuid": body_uuid,
+ },
+ knowledge_base_update_params.KnowledgeBaseUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseListResponse:
+ """
+ To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ (
+ "/v2/gen-ai/knowledge_bases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases"
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ knowledge_base_list_params.KnowledgeBaseListParams,
+ ),
+ ),
+ cast_to=KnowledgeBaseListResponse,
+ )
+
+ def delete(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseDeleteResponse:
+ """
+ To delete a knowledge base, send a DELETE request to
+ `/v2/gen-ai/knowledge_bases/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._delete(
+ (
+ f"/v2/gen-ai/knowledge_bases/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}"
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseDeleteResponse,
+ )
+
+ def wait_for_database(
+ self,
+ uuid: str,
+ *,
+ timeout: float = 600.0,
+ poll_interval: float = 5.0,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ ) -> KnowledgeBaseRetrieveResponse:
+ """
+ Poll the knowledge base until the database status is ONLINE or a failed state is reached.
+
+ This helper function repeatedly calls retrieve() to check the database_status field.
+ It will wait for the database to become ONLINE, or raise an exception if it enters
+ a failed state (DECOMMISSIONED or UNHEALTHY) or if the timeout is exceeded.
+
+ Args:
+ uuid: The knowledge base UUID to poll
+
+ timeout: Maximum time to wait in seconds (default: 600 seconds / 10 minutes)
+
+ poll_interval: Time to wait between polls in seconds (default: 5 seconds)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ Returns:
+ The final KnowledgeBaseRetrieveResponse when the database status is ONLINE
+
+ Raises:
+ KnowledgeBaseDatabaseError: If the database enters a failed state (DECOMMISSIONED, UNHEALTHY)
+
+ KnowledgeBaseTimeoutError: If the timeout is exceeded before the database becomes ONLINE
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+
+ start_time = time.time()
+ failed_states = {"DECOMMISSIONED", "UNHEALTHY"}
+
+ while True:
+ elapsed = time.time() - start_time
+ if elapsed >= timeout:
+ raise KnowledgeBaseTimeoutError(
+ f"Timeout waiting for knowledge base database to become ready. "
+ f"Database did not reach ONLINE status within {timeout} seconds."
+ )
+
+ response = self.retrieve(
+ uuid,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ )
+
+ status = response.database_status
+
+ if status == "ONLINE":
+ return response
+
+ if status in failed_states:
+ raise KnowledgeBaseDatabaseError(f"Knowledge base database entered failed state: {status}")
+
+ # Sleep before next poll, but don't exceed timeout
+ remaining_time = timeout - elapsed
+ sleep_time = min(poll_interval, remaining_time)
+ if sleep_time > 0:
+ time.sleep(sleep_time)
+
+ def list_indexing_jobs(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseListIndexingJobsResponse:
+ """
+ To list latest 15 indexing jobs for a knowledge base, send a GET request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return self._get(
+ (
+ f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs"
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseListIndexingJobsResponse,
+ )
+
+
+class AsyncKnowledgeBasesResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def data_sources(self) -> AsyncDataSourcesResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncDataSourcesResource(self._client)
+
+ @cached_property
+ def indexing_jobs(self) -> AsyncIndexingJobsResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncIndexingJobsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncKnowledgeBasesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(
+ self,
+ ) -> AsyncKnowledgeBasesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncKnowledgeBasesResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ database_id: str | Omit = omit,
+ datasources: Iterable[knowledge_base_create_params.Datasource] | Omit = omit,
+ embedding_model_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ project_id: str | Omit = omit,
+ region: str | Omit = omit,
+ tags: SequenceNotStr[str] | Omit = omit,
+ vpc_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseCreateResponse:
+ """
+ To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`.
+
+ Args:
+ database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use,
+ optional. If not provided, we create a new database for the knowledge base in
+ the same region as the knowledge base.
+
+ datasources: The data sources to use for this knowledge base. See
+ [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets)
+ for more information on data sources best practices.
+
+ embedding_model_uuid: Identifier for the
+ [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models).
+
+ name: Name of the knowledge base.
+
+ project_id: Identifier of the DigitalOcean project this knowledge base will belong to.
+
+ region: The datacenter region to deploy the knowledge base in.
+
+ tags: Tags to organize your knowledge base.
+
+ vpc_uuid: The VPC to deploy the knowledge base database in
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ (
+ "/v2/gen-ai/knowledge_bases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases"
+ ),
+ body=await async_maybe_transform(
+ {
+ "database_id": database_id,
+ "datasources": datasources,
+ "embedding_model_uuid": embedding_model_uuid,
+ "name": name,
+ "project_id": project_id,
+ "region": region,
+ "tags": tags,
+ "vpc_uuid": vpc_uuid,
+ },
+ knowledge_base_create_params.KnowledgeBaseCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseRetrieveResponse:
+ """
+ To retrive information about an existing knowledge base, send a GET request to
+ `/v2/gen-ai/knowledge_bases/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ (
+ f"/v2/gen-ai/knowledge_bases/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}"
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_uuid: str,
+ *,
+ database_id: str | Omit = omit,
+ embedding_model_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ project_id: str | Omit = omit,
+ tags: SequenceNotStr[str] | Omit = omit,
+ body_uuid: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseUpdateResponse:
+ """
+ To update a knowledge base, send a PUT request to
+ `/v2/gen-ai/knowledge_bases/{uuid}`.
+
+ Args:
+ database_id: The id of the DigitalOcean database this knowledge base will use, optiona.
+
+ embedding_model_uuid: Identifier for the foundation model.
+
+ name: Knowledge base name
+
+ project_id: The id of the DigitalOcean project this knowledge base will belong to
+
+ tags: Tags to organize your knowledge base.
+
+ body_uuid: Knowledge base id
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ return await self._put(
+ (
+ f"/v2/gen-ai/knowledge_bases/{path_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_uuid}"
+ ),
+ body=await async_maybe_transform(
+ {
+ "database_id": database_id,
+ "embedding_model_uuid": embedding_model_uuid,
+ "name": name,
+ "project_id": project_id,
+ "tags": tags,
+ "body_uuid": body_uuid,
+ },
+ knowledge_base_update_params.KnowledgeBaseUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseListResponse:
+ """
+ To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ (
+ "/v2/gen-ai/knowledge_bases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases"
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ knowledge_base_list_params.KnowledgeBaseListParams,
+ ),
+ ),
+ cast_to=KnowledgeBaseListResponse,
+ )
+
+ async def delete(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseDeleteResponse:
+ """
+ To delete a knowledge base, send a DELETE request to
+ `/v2/gen-ai/knowledge_bases/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._delete(
+ (
+ f"/v2/gen-ai/knowledge_bases/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}"
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseDeleteResponse,
+ )
+
+ async def wait_for_database(
+ self,
+ uuid: str,
+ *,
+ timeout: float = 600.0,
+ poll_interval: float = 5.0,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ ) -> KnowledgeBaseRetrieveResponse:
+ """
+ Poll the knowledge base until the database status is ONLINE or a failed state is reached.
+
+ This helper function repeatedly calls retrieve() to check the database_status field.
+ It will wait for the database to become ONLINE, or raise an exception if it enters
+ a failed state (DECOMMISSIONED or UNHEALTHY) or if the timeout is exceeded.
+
+ Args:
+ uuid: The knowledge base UUID to poll
+
+ timeout: Maximum time to wait in seconds (default: 600 seconds / 10 minutes)
+
+ poll_interval: Time to wait between polls in seconds (default: 5 seconds)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ Returns:
+ The final KnowledgeBaseRetrieveResponse when the database status is ONLINE
+
+ Raises:
+ KnowledgeBaseDatabaseError: If the database enters a failed state (DECOMMISSIONED, UNHEALTHY)
+
+ KnowledgeBaseTimeoutError: If the timeout is exceeded before the database becomes ONLINE
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+
+ start_time = time.time()
+ failed_states = {"DECOMMISSIONED", "UNHEALTHY"}
+
+ while True:
+ elapsed = time.time() - start_time
+ if elapsed >= timeout:
+ raise KnowledgeBaseTimeoutError(
+ f"Timeout waiting for knowledge base database to become ready. "
+ f"Database did not reach ONLINE status within {timeout} seconds."
+ )
+
+ response = await self.retrieve(
+ uuid,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ )
+
+ status = response.database_status
+
+ if status == "ONLINE":
+ return response
+
+ if status in failed_states:
+ raise KnowledgeBaseDatabaseError(f"Knowledge base database entered failed state: {status}")
+
+ # Sleep before next poll, but don't exceed timeout
+ remaining_time = timeout - elapsed
+ sleep_time = min(poll_interval, remaining_time)
+ if sleep_time > 0:
+ await asyncio.sleep(sleep_time)
+
+ async def list_indexing_jobs(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseListIndexingJobsResponse:
+ """
+ To list latest 15 indexing jobs for a knowledge base, send a GET request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return await self._get(
+ (
+ f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs"
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseListIndexingJobsResponse,
+ )
+
+
+class KnowledgeBasesResourceWithRawResponse:
+ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None:
+ self._knowledge_bases = knowledge_bases
+
+ self.create = to_raw_response_wrapper(
+ knowledge_bases.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ knowledge_bases.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ knowledge_bases.update,
+ )
+ self.list = to_raw_response_wrapper(
+ knowledge_bases.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ knowledge_bases.delete,
+ )
+ self.wait_for_database = to_raw_response_wrapper(
+ knowledge_bases.wait_for_database,
+ )
+ self.list_indexing_jobs = to_raw_response_wrapper(
+ knowledge_bases.list_indexing_jobs,
+ )
+
+ @cached_property
+ def data_sources(self) -> DataSourcesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources)
+
+ @cached_property
+ def indexing_jobs(self) -> IndexingJobsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return IndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs)
+
+
+class AsyncKnowledgeBasesResourceWithRawResponse:
+ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None:
+ self._knowledge_bases = knowledge_bases
+
+ self.create = async_to_raw_response_wrapper(
+ knowledge_bases.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ knowledge_bases.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ knowledge_bases.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ knowledge_bases.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ knowledge_bases.delete,
+ )
+ self.wait_for_database = async_to_raw_response_wrapper(
+ knowledge_bases.wait_for_database,
+ )
+ self.list_indexing_jobs = async_to_raw_response_wrapper(
+ knowledge_bases.list_indexing_jobs,
+ )
+
+ @cached_property
+ def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources)
+
+ @cached_property
+ def indexing_jobs(self) -> AsyncIndexingJobsResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncIndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs)
+
+
+class KnowledgeBasesResourceWithStreamingResponse:
+ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None:
+ self._knowledge_bases = knowledge_bases
+
+ self.create = to_streamed_response_wrapper(
+ knowledge_bases.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ knowledge_bases.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ knowledge_bases.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ knowledge_bases.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ knowledge_bases.delete,
+ )
+ self.wait_for_database = to_streamed_response_wrapper(
+ knowledge_bases.wait_for_database,
+ )
+ self.list_indexing_jobs = to_streamed_response_wrapper(
+ knowledge_bases.list_indexing_jobs,
+ )
+
+ @cached_property
+ def data_sources(self) -> DataSourcesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources)
+
+ @cached_property
+ def indexing_jobs(self) -> IndexingJobsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return IndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs)
+
+
+class AsyncKnowledgeBasesResourceWithStreamingResponse:
+ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None:
+ self._knowledge_bases = knowledge_bases
+
+ self.create = async_to_streamed_response_wrapper(
+ knowledge_bases.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ knowledge_bases.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ knowledge_bases.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ knowledge_bases.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ knowledge_bases.delete,
+ )
+ self.wait_for_database = async_to_streamed_response_wrapper(
+ knowledge_bases.wait_for_database,
+ )
+ self.list_indexing_jobs = async_to_streamed_response_wrapper(
+ knowledge_bases.list_indexing_jobs,
+ )
+
+ @cached_property
+ def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources)
+
+ @cached_property
+ def indexing_jobs(self) -> AsyncIndexingJobsResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncIndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs)
diff --git a/src/gradient/resources/models/__init__.py b/src/gradient/resources/models/__init__.py
new file mode 100644
index 00000000..e30dd201
--- /dev/null
+++ b/src/gradient/resources/models/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .models import (
+ ModelsResource,
+ AsyncModelsResource,
+ ModelsResourceWithRawResponse,
+ AsyncModelsResourceWithRawResponse,
+ ModelsResourceWithStreamingResponse,
+ AsyncModelsResourceWithStreamingResponse,
+)
+from .providers import (
+ ProvidersResource,
+ AsyncProvidersResource,
+ ProvidersResourceWithRawResponse,
+ AsyncProvidersResourceWithRawResponse,
+ ProvidersResourceWithStreamingResponse,
+ AsyncProvidersResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "ProvidersResource",
+ "AsyncProvidersResource",
+ "ProvidersResourceWithRawResponse",
+ "AsyncProvidersResourceWithRawResponse",
+ "ProvidersResourceWithStreamingResponse",
+ "AsyncProvidersResourceWithStreamingResponse",
+ "ModelsResource",
+ "AsyncModelsResource",
+ "ModelsResourceWithRawResponse",
+ "AsyncModelsResourceWithRawResponse",
+ "ModelsResourceWithStreamingResponse",
+ "AsyncModelsResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/models/models.py b/src/gradient/resources/models/models.py
new file mode 100644
index 00000000..568fc325
--- /dev/null
+++ b/src/gradient/resources/models/models.py
@@ -0,0 +1,294 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal
+
+import httpx
+
+from ...types import model_list_params
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from .providers.providers import (
+ ProvidersResource,
+ AsyncProvidersResource,
+ ProvidersResourceWithRawResponse,
+ AsyncProvidersResourceWithRawResponse,
+ ProvidersResourceWithStreamingResponse,
+ AsyncProvidersResourceWithStreamingResponse,
+)
+from ...types.model_list_response import ModelListResponse
+
+__all__ = ["ModelsResource", "AsyncModelsResource"]
+
+
+class ModelsResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def providers(self) -> ProvidersResource:
+ return ProvidersResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ModelsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ModelsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ModelsResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ public_only: bool | Omit = omit,
+ usecases: List[
+ Literal[
+ "MODEL_USECASE_UNKNOWN",
+ "MODEL_USECASE_AGENT",
+ "MODEL_USECASE_FINETUNED",
+ "MODEL_USECASE_KNOWLEDGEBASE",
+ "MODEL_USECASE_GUARDRAIL",
+ "MODEL_USECASE_REASONING",
+ "MODEL_USECASE_SERVERLESS",
+ ]
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ModelListResponse:
+ """
+ To list all models, send a GET request to `/v2/gen-ai/models`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ public_only: Only include models that are publicly available.
+
+ usecases: Include only models defined for the listed usecases.
+
+ - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
+ - MODEL_USECASE_AGENT: The model maybe used in an agent
+ - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
+ - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
+ (embedding models)
+ - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
+ - MODEL_USECASE_REASONING: The model usecase for reasoning
+ - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/models"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/models",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ "public_only": public_only,
+ "usecases": usecases,
+ },
+ model_list_params.ModelListParams,
+ ),
+ ),
+ cast_to=ModelListResponse,
+ )
+
+
+class AsyncModelsResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def providers(self) -> AsyncProvidersResource:
+ return AsyncProvidersResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncModelsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncModelsResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ public_only: bool | Omit = omit,
+ usecases: List[
+ Literal[
+ "MODEL_USECASE_UNKNOWN",
+ "MODEL_USECASE_AGENT",
+ "MODEL_USECASE_FINETUNED",
+ "MODEL_USECASE_KNOWLEDGEBASE",
+ "MODEL_USECASE_GUARDRAIL",
+ "MODEL_USECASE_REASONING",
+ "MODEL_USECASE_SERVERLESS",
+ ]
+ ]
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ModelListResponse:
+ """
+ To list all models, send a GET request to `/v2/gen-ai/models`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ public_only: Only include models that are publicly available.
+
+ usecases: Include only models defined for the listed usecases.
+
+ - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
+ - MODEL_USECASE_AGENT: The model maybe used in an agent
+ - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
+ - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
+ (embedding models)
+ - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
+ - MODEL_USECASE_REASONING: The model usecase for reasoning
+ - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/models"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/models",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ "public_only": public_only,
+ "usecases": usecases,
+ },
+ model_list_params.ModelListParams,
+ ),
+ ),
+ cast_to=ModelListResponse,
+ )
+
+
+class ModelsResourceWithRawResponse:
+ def __init__(self, models: ModelsResource) -> None:
+ self._models = models
+
+ self.list = to_raw_response_wrapper(
+ models.list,
+ )
+
+ @cached_property
+ def providers(self) -> ProvidersResourceWithRawResponse:
+ return ProvidersResourceWithRawResponse(self._models.providers)
+
+
+class AsyncModelsResourceWithRawResponse:
+ def __init__(self, models: AsyncModelsResource) -> None:
+ self._models = models
+
+ self.list = async_to_raw_response_wrapper(
+ models.list,
+ )
+
+ @cached_property
+ def providers(self) -> AsyncProvidersResourceWithRawResponse:
+ return AsyncProvidersResourceWithRawResponse(self._models.providers)
+
+
+class ModelsResourceWithStreamingResponse:
+ def __init__(self, models: ModelsResource) -> None:
+ self._models = models
+
+ self.list = to_streamed_response_wrapper(
+ models.list,
+ )
+
+ @cached_property
+ def providers(self) -> ProvidersResourceWithStreamingResponse:
+ return ProvidersResourceWithStreamingResponse(self._models.providers)
+
+
+class AsyncModelsResourceWithStreamingResponse:
+ def __init__(self, models: AsyncModelsResource) -> None:
+ self._models = models
+
+ self.list = async_to_streamed_response_wrapper(
+ models.list,
+ )
+
+ @cached_property
+ def providers(self) -> AsyncProvidersResourceWithStreamingResponse:
+ return AsyncProvidersResourceWithStreamingResponse(self._models.providers)
diff --git a/src/gradient/resources/models/providers/__init__.py b/src/gradient/resources/models/providers/__init__.py
new file mode 100644
index 00000000..1731e057
--- /dev/null
+++ b/src/gradient/resources/models/providers/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+from .anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+from .providers import (
+ ProvidersResource,
+ AsyncProvidersResource,
+ ProvidersResourceWithRawResponse,
+ AsyncProvidersResourceWithRawResponse,
+ ProvidersResourceWithStreamingResponse,
+ AsyncProvidersResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "AnthropicResource",
+ "AsyncAnthropicResource",
+ "AnthropicResourceWithRawResponse",
+ "AsyncAnthropicResourceWithRawResponse",
+ "AnthropicResourceWithStreamingResponse",
+ "AsyncAnthropicResourceWithStreamingResponse",
+ "OpenAIResource",
+ "AsyncOpenAIResource",
+ "OpenAIResourceWithRawResponse",
+ "AsyncOpenAIResourceWithRawResponse",
+ "OpenAIResourceWithStreamingResponse",
+ "AsyncOpenAIResourceWithStreamingResponse",
+ "ProvidersResource",
+ "AsyncProvidersResource",
+ "ProvidersResourceWithRawResponse",
+ "AsyncProvidersResourceWithRawResponse",
+ "ProvidersResourceWithStreamingResponse",
+ "AsyncProvidersResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/models/providers/anthropic.py b/src/gradient/resources/models/providers/anthropic.py
new file mode 100644
index 00000000..ce44fe06
--- /dev/null
+++ b/src/gradient/resources/models/providers/anthropic.py
@@ -0,0 +1,719 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.models.providers import (
+ anthropic_list_params,
+ anthropic_create_params,
+ anthropic_update_params,
+ anthropic_list_agents_params,
+)
+from ....types.models.providers.anthropic_list_response import AnthropicListResponse
+from ....types.models.providers.anthropic_create_response import AnthropicCreateResponse
+from ....types.models.providers.anthropic_delete_response import AnthropicDeleteResponse
+from ....types.models.providers.anthropic_update_response import AnthropicUpdateResponse
+from ....types.models.providers.anthropic_retrieve_response import AnthropicRetrieveResponse
+from ....types.models.providers.anthropic_list_agents_response import AnthropicListAgentsResponse
+
+__all__ = ["AnthropicResource", "AsyncAnthropicResource"]
+
+
+class AnthropicResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AnthropicResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AnthropicResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AnthropicResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ api_key: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicCreateResponse:
+ """
+ To create an Anthropic API key, send a POST request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ api_key: Anthropic API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ anthropic_create_params.AnthropicCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicRetrieveResponse:
+ """
+ To retrieve details of an Anthropic API key, send a GET request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | Omit = omit,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicUpdateResponse:
+ """
+ To update an Anthropic API key, send a PUT request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: Anthropic API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ anthropic_update_params.AnthropicUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicListResponse:
+ """
+ To list all Anthropic API keys, send a GET request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ anthropic_list_params.AnthropicListParams,
+ ),
+ ),
+ cast_to=AnthropicListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicDeleteResponse:
+ """
+ To delete an Anthropic API key, send a DELETE request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicDeleteResponse,
+ )
+
+ def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicListAgentsResponse:
+ """
+ List Agents by Anthropic Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ anthropic_list_agents_params.AnthropicListAgentsParams,
+ ),
+ ),
+ cast_to=AnthropicListAgentsResponse,
+ )
+
+
+class AsyncAnthropicResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAnthropicResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncAnthropicResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ api_key: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicCreateResponse:
+ """
+ To create an Anthropic API key, send a POST request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ api_key: Anthropic API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ anthropic_create_params.AnthropicCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicRetrieveResponse:
+ """
+ To retrieve details of an Anthropic API key, send a GET request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | Omit = omit,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicUpdateResponse:
+ """
+ To update an Anthropic API key, send a PUT request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: Anthropic API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ anthropic_update_params.AnthropicUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicListResponse:
+ """
+ To list all Anthropic API keys, send a GET request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ anthropic_list_params.AnthropicListParams,
+ ),
+ ),
+ cast_to=AnthropicListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicDeleteResponse:
+ """
+ To delete an Anthropic API key, send a DELETE request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicDeleteResponse,
+ )
+
+ async def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AnthropicListAgentsResponse:
+ """
+ List Agents by Anthropic Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ anthropic_list_agents_params.AnthropicListAgentsParams,
+ ),
+ ),
+ cast_to=AnthropicListAgentsResponse,
+ )
+
+
+class AnthropicResourceWithRawResponse:
+ def __init__(self, anthropic: AnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ self.create = to_raw_response_wrapper(
+ anthropic.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ anthropic.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ anthropic.update,
+ )
+ self.list = to_raw_response_wrapper(
+ anthropic.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ anthropic.delete,
+ )
+ self.list_agents = to_raw_response_wrapper(
+ anthropic.list_agents,
+ )
+
+
+class AsyncAnthropicResourceWithRawResponse:
+ def __init__(self, anthropic: AsyncAnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ self.create = async_to_raw_response_wrapper(
+ anthropic.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ anthropic.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ anthropic.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ anthropic.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ anthropic.delete,
+ )
+ self.list_agents = async_to_raw_response_wrapper(
+ anthropic.list_agents,
+ )
+
+
+class AnthropicResourceWithStreamingResponse:
+ def __init__(self, anthropic: AnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ self.create = to_streamed_response_wrapper(
+ anthropic.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ anthropic.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ anthropic.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ anthropic.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ anthropic.delete,
+ )
+ self.list_agents = to_streamed_response_wrapper(
+ anthropic.list_agents,
+ )
+
+
+class AsyncAnthropicResourceWithStreamingResponse:
+ def __init__(self, anthropic: AsyncAnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ self.create = async_to_streamed_response_wrapper(
+ anthropic.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ anthropic.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ anthropic.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ anthropic.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ anthropic.delete,
+ )
+ self.list_agents = async_to_streamed_response_wrapper(
+ anthropic.list_agents,
+ )
diff --git a/src/gradient/resources/models/providers/openai.py b/src/gradient/resources/models/providers/openai.py
new file mode 100644
index 00000000..e048a32f
--- /dev/null
+++ b/src/gradient/resources/models/providers/openai.py
@@ -0,0 +1,715 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.models.providers import (
+ openai_list_params,
+ openai_create_params,
+ openai_update_params,
+ openai_retrieve_agents_params,
+)
+from ....types.models.providers.openai_list_response import OpenAIListResponse
+from ....types.models.providers.openai_create_response import OpenAICreateResponse
+from ....types.models.providers.openai_delete_response import OpenAIDeleteResponse
+from ....types.models.providers.openai_update_response import OpenAIUpdateResponse
+from ....types.models.providers.openai_retrieve_response import OpenAIRetrieveResponse
+from ....types.models.providers.openai_retrieve_agents_response import OpenAIRetrieveAgentsResponse
+
+__all__ = ["OpenAIResource", "AsyncOpenAIResource"]
+
+
+class OpenAIResource(SyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> OpenAIResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return OpenAIResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return OpenAIResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ api_key: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAICreateResponse:
+ """
+ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ api_key: OpenAI API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ openai_create_params.OpenAICreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAICreateResponse,
+ )
+
+ def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAIRetrieveResponse:
+ """
+ To retrieve details of an OpenAI API key, send a GET request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | Omit = omit,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAIUpdateResponse:
+ """
+ To update an OpenAI API key, send a PUT request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: OpenAI API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ openai_update_params.OpenAIUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAIListResponse:
+ """
+ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ openai_list_params.OpenAIListParams,
+ ),
+ ),
+ cast_to=OpenAIListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAIDeleteResponse:
+ """
+ To delete an OpenAI API key, send a DELETE request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIDeleteResponse,
+ )
+
+ def retrieve_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAIRetrieveAgentsResponse:
+ """
+ List Agents by OpenAI Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/openai/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ openai_retrieve_agents_params.OpenAIRetrieveAgentsParams,
+ ),
+ ),
+ cast_to=OpenAIRetrieveAgentsResponse,
+ )
+
+
+class AsyncOpenAIResource(AsyncAPIResource):
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncOpenAIResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncOpenAIResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ api_key: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAICreateResponse:
+ """
+ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ api_key: OpenAI API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ openai_create_params.OpenAICreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAICreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAIRetrieveResponse:
+ """
+ To retrieve details of an OpenAI API key, send a GET request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | Omit = omit,
+ body_api_key_uuid: str | Omit = omit,
+ name: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAIUpdateResponse:
+ """
+ To update an OpenAI API key, send a PUT request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: OpenAI API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ openai_update_params.OpenAIUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAIListResponse:
+ """
+ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ openai_list_params.OpenAIListParams,
+ ),
+ ),
+ cast_to=OpenAIListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAIDeleteResponse:
+ """
+ To delete an OpenAI API key, send a DELETE request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIDeleteResponse,
+ )
+
+ async def retrieve_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> OpenAIRetrieveAgentsResponse:
+ """
+ List Agents by OpenAI Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/openai/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ openai_retrieve_agents_params.OpenAIRetrieveAgentsParams,
+ ),
+ ),
+ cast_to=OpenAIRetrieveAgentsResponse,
+ )
+
+
+class OpenAIResourceWithRawResponse:
+ def __init__(self, openai: OpenAIResource) -> None:
+ self._openai = openai
+
+ self.create = to_raw_response_wrapper(
+ openai.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ openai.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ openai.update,
+ )
+ self.list = to_raw_response_wrapper(
+ openai.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ openai.delete,
+ )
+ self.retrieve_agents = to_raw_response_wrapper(
+ openai.retrieve_agents,
+ )
+
+
+class AsyncOpenAIResourceWithRawResponse:
+ def __init__(self, openai: AsyncOpenAIResource) -> None:
+ self._openai = openai
+
+ self.create = async_to_raw_response_wrapper(
+ openai.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ openai.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ openai.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ openai.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ openai.delete,
+ )
+ self.retrieve_agents = async_to_raw_response_wrapper(
+ openai.retrieve_agents,
+ )
+
+
+class OpenAIResourceWithStreamingResponse:
+ def __init__(self, openai: OpenAIResource) -> None:
+ self._openai = openai
+
+ self.create = to_streamed_response_wrapper(
+ openai.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ openai.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ openai.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ openai.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ openai.delete,
+ )
+ self.retrieve_agents = to_streamed_response_wrapper(
+ openai.retrieve_agents,
+ )
+
+
+class AsyncOpenAIResourceWithStreamingResponse:
+ def __init__(self, openai: AsyncOpenAIResource) -> None:
+ self._openai = openai
+
+ self.create = async_to_streamed_response_wrapper(
+ openai.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ openai.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ openai.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ openai.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ openai.delete,
+ )
+ self.retrieve_agents = async_to_streamed_response_wrapper(
+ openai.retrieve_agents,
+ )
diff --git a/src/gradient/resources/models/providers/providers.py b/src/gradient/resources/models/providers/providers.py
new file mode 100644
index 00000000..b77bf5af
--- /dev/null
+++ b/src/gradient/resources/models/providers/providers.py
@@ -0,0 +1,170 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+from .anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["ProvidersResource", "AsyncProvidersResource"]
+
+
+class ProvidersResource(SyncAPIResource):
+ @cached_property
+ def anthropic(self) -> AnthropicResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AnthropicResource(self._client)
+
+ @cached_property
+ def openai(self) -> OpenAIResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return OpenAIResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ProvidersResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ProvidersResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ProvidersResourceWithStreamingResponse(self)
+
+
+class AsyncProvidersResource(AsyncAPIResource):
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAnthropicResource(self._client)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResource:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncOpenAIResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncProvidersResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncProvidersResourceWithStreamingResponse(self)
+
+
+class ProvidersResourceWithRawResponse:
+ def __init__(self, providers: ProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AnthropicResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AnthropicResourceWithRawResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> OpenAIResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return OpenAIResourceWithRawResponse(self._providers.openai)
+
+
+class AsyncProvidersResourceWithRawResponse:
+ def __init__(self, providers: AsyncProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResourceWithRawResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncOpenAIResourceWithRawResponse(self._providers.openai)
+
+
+class ProvidersResourceWithStreamingResponse:
+ def __init__(self, providers: ProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AnthropicResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AnthropicResourceWithStreamingResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> OpenAIResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return OpenAIResourceWithStreamingResponse(self._providers.openai)
+
+
+class AsyncProvidersResourceWithStreamingResponse:
+ def __init__(self, providers: AsyncProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResourceWithStreamingResponse:
+ """
+ The API lets you build GPU-powered AI agents with pre-built or custom foundation models, function and agent routes, and RAG pipelines with knowledge bases.
+ """
+ return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai)
diff --git a/src/gradient/resources/nfs/__init__.py b/src/gradient/resources/nfs/__init__.py
new file mode 100644
index 00000000..28f843c0
--- /dev/null
+++ b/src/gradient/resources/nfs/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .nfs import (
+ NfsResource,
+ AsyncNfsResource,
+ NfsResourceWithRawResponse,
+ AsyncNfsResourceWithRawResponse,
+ NfsResourceWithStreamingResponse,
+ AsyncNfsResourceWithStreamingResponse,
+)
+from .snapshots import (
+ SnapshotsResource,
+ AsyncSnapshotsResource,
+ SnapshotsResourceWithRawResponse,
+ AsyncSnapshotsResourceWithRawResponse,
+ SnapshotsResourceWithStreamingResponse,
+ AsyncSnapshotsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "SnapshotsResource",
+ "AsyncSnapshotsResource",
+ "SnapshotsResourceWithRawResponse",
+ "AsyncSnapshotsResourceWithRawResponse",
+ "SnapshotsResourceWithStreamingResponse",
+ "AsyncSnapshotsResourceWithStreamingResponse",
+ "NfsResource",
+ "AsyncNfsResource",
+ "NfsResourceWithRawResponse",
+ "AsyncNfsResourceWithRawResponse",
+ "NfsResourceWithStreamingResponse",
+ "AsyncNfsResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/nfs/nfs.py b/src/gradient/resources/nfs/nfs.py
new file mode 100644
index 00000000..ec50bdf8
--- /dev/null
+++ b/src/gradient/resources/nfs/nfs.py
@@ -0,0 +1,1088 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ...types import nf_list_params, nf_create_params, nf_delete_params, nf_retrieve_params, nf_initiate_action_params
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
+from ..._utils import required_args, maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from .snapshots import (
+ SnapshotsResource,
+ AsyncSnapshotsResource,
+ SnapshotsResourceWithRawResponse,
+ AsyncSnapshotsResourceWithRawResponse,
+ SnapshotsResourceWithStreamingResponse,
+ AsyncSnapshotsResourceWithStreamingResponse,
+)
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.nf_list_response import NfListResponse
+from ...types.nf_create_response import NfCreateResponse
+from ...types.nf_retrieve_response import NfRetrieveResponse
+from ...types.nf_initiate_action_response import NfInitiateActionResponse
+
+__all__ = ["NfsResource", "AsyncNfsResource"]
+
+
+class NfsResource(SyncAPIResource):
+ @cached_property
+ def snapshots(self) -> SnapshotsResource:
+ """
+ NFS lets you create fully managed, POSIX-compliant network file storage that delivers secure,
+ high-performance shared storage right inside your VPC. This enables seamless data sharing across Droplets in a VPC.
+ """
+ return SnapshotsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> NfsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return NfsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> NfsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return NfsResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ name: str,
+ region: str,
+ size_gib: int,
+ vpc_ids: SequenceNotStr[str],
+ performance_tier: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfCreateResponse:
+ """
+ To create a new NFS share, send a POST request to `/v2/nfs`.
+
+ Args:
+ name: The human-readable name of the share.
+
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ size_gib: The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50.
+
+ vpc_ids: List of VPC IDs that should be able to access the share.
+
+ performance_tier: The performance tier of the share.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs",
+ body=maybe_transform(
+ {
+ "name": name,
+ "region": region,
+ "size_gib": size_gib,
+ "vpc_ids": vpc_ids,
+ "performance_tier": performance_tier,
+ },
+ nf_create_params.NfCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NfCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ nfs_id: str,
+ *,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfRetrieveResponse:
+ """
+ To get an NFS share, send a GET request to `/v2/nfs/{nfs_id}?region=${region}`.
+
+ A successful request will return the NFS share.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ return self._get(
+ f"/v2/nfs/{nfs_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"region": region}, nf_retrieve_params.NfRetrieveParams),
+ ),
+ cast_to=NfRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfListResponse:
+ """
+ To list NFS shares, send a GET request to `/v2/nfs?region=${region}`.
+
+ A successful request will return all NFS shares belonging to the authenticated
+ user.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"region": region}, nf_list_params.NfListParams),
+ ),
+ cast_to=NfListResponse,
+ )
+
+ def delete(
+ self,
+ nfs_id: str,
+ *,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete an NFS share, send a DELETE request to
+ `/v2/nfs/{nfs_id}?region=${region}`.
+
+ A successful request will return a `204 No Content` status code.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/nfs/{nfs_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"region": region}, nf_delete_params.NfDeleteParams),
+ ),
+ cast_to=NoneType,
+ )
+
+ @overload
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionResizeParams | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `switch_performance_tier` | Switches the performance tier of an NFS share. Set the performance_tier attribute to the desired tier (e.g., standard, high) |
+
+ Args:
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionSnapshotParams | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `switch_performance_tier` | Switches the performance tier of an NFS share. Set the performance_tier attribute to the desired tier (e.g., standard, high) |
+
+ Args:
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionAttachParams | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `switch_performance_tier` | Switches the performance tier of an NFS share. Set the performance_tier attribute to the desired tier (e.g., standard, high) |
+
+ Args:
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionDetachParams | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `switch_performance_tier` | Switches the performance tier of an NFS share. Set the performance_tier attribute to the desired tier (e.g., standard, high) |
+
+ Args:
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionSwitchPerformanceTierParams | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `switch_performance_tier` | Switches the performance tier of an NFS share. Set the performance_tier attribute to the desired tier (e.g., standard, high) |
+
+ Args:
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["type"])
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionResizeParams
+ | nf_initiate_action_params.NfsActionSnapshotParams
+ | nf_initiate_action_params.NfsActionAttachParams
+ | nf_initiate_action_params.NfsActionDetachParams
+ | nf_initiate_action_params.NfsActionSwitchPerformanceTierParams
+ | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ return self._post(
+ f"/v2/nfs/{nfs_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}/actions",
+ body=maybe_transform(
+ {
+ "type": type,
+ "params": params,
+ "region": region,
+ },
+ nf_initiate_action_params.NfInitiateActionParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NfInitiateActionResponse,
+ )
+
+
+class AsyncNfsResource(AsyncAPIResource):
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResource:
+ """
+ NFS lets you create fully managed, POSIX-compliant network file storage that delivers secure,
+ high-performance shared storage right inside your VPC. This enables seamless data sharing across Droplets in a VPC.
+ """
+ return AsyncSnapshotsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncNfsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncNfsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncNfsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncNfsResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ name: str,
+ region: str,
+ size_gib: int,
+ vpc_ids: SequenceNotStr[str],
+ performance_tier: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfCreateResponse:
+ """
+ To create a new NFS share, send a POST request to `/v2/nfs`.
+
+ Args:
+ name: The human-readable name of the share.
+
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ size_gib: The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50.
+
+ vpc_ids: List of VPC IDs that should be able to access the share.
+
+ performance_tier: The performance tier of the share.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs",
+ body=await async_maybe_transform(
+ {
+ "name": name,
+ "region": region,
+ "size_gib": size_gib,
+ "vpc_ids": vpc_ids,
+ "performance_tier": performance_tier,
+ },
+ nf_create_params.NfCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NfCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ nfs_id: str,
+ *,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfRetrieveResponse:
+ """
+ To get an NFS share, send a GET request to `/v2/nfs/{nfs_id}?region=${region}`.
+
+ A successful request will return the NFS share.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ return await self._get(
+ f"/v2/nfs/{nfs_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"region": region}, nf_retrieve_params.NfRetrieveParams),
+ ),
+ cast_to=NfRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfListResponse:
+ """
+ To list NFS shares, send a GET request to `/v2/nfs?region=${region}`.
+
+ A successful request will return all NFS shares belonging to the authenticated
+ user.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"region": region}, nf_list_params.NfListParams),
+ ),
+ cast_to=NfListResponse,
+ )
+
+ async def delete(
+ self,
+ nfs_id: str,
+ *,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete an NFS share, send a DELETE request to
+ `/v2/nfs/{nfs_id}?region=${region}`.
+
+ A successful request will return a `204 No Content` status code.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/nfs/{nfs_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"region": region}, nf_delete_params.NfDeleteParams),
+ ),
+ cast_to=NoneType,
+ )
+
+ @overload
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionResizeParams | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `switch_performance_tier` | Switches the performance tier of an NFS share. Set the performance_tier attribute to the desired tier (e.g., standard, high) |
+
+ Args:
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionSnapshotParams | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `switch_performance_tier` | Switches the performance tier of an NFS share. Set the performance_tier attribute to the desired tier (e.g., standard, high) |
+
+ Args:
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionAttachParams | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `switch_performance_tier` | Switches the performance tier of an NFS share. Set the performance_tier attribute to the desired tier (e.g., standard, high) |
+
+ Args:
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionDetachParams | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `switch_performance_tier` | Switches the performance tier of an NFS share. Set the performance_tier attribute to the desired tier (e.g., standard, high) |
+
+ Args:
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionSwitchPerformanceTierParams | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+ | `attach` | Attaches an NFS share to a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `detach` | Detaches an NFS share from a VPC. Set the vpc_id attribute to the desired VPC ID |
+ | `switch_performance_tier` | Switches the performance tier of an NFS share. Set the performance_tier attribute to the desired tier (e.g., standard, high) |
+
+ Args:
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["type"])
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionResizeParams
+ | nf_initiate_action_params.NfsActionSnapshotParams
+ | nf_initiate_action_params.NfsActionAttachParams
+ | nf_initiate_action_params.NfsActionDetachParams
+ | nf_initiate_action_params.NfsActionSwitchPerformanceTierParams
+ | Omit = omit,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ return await self._post(
+ f"/v2/nfs/{nfs_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}/actions",
+ body=await async_maybe_transform(
+ {
+ "type": type,
+ "params": params,
+ "region": region,
+ },
+ nf_initiate_action_params.NfInitiateActionParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NfInitiateActionResponse,
+ )
+
+
+class NfsResourceWithRawResponse:
+ def __init__(self, nfs: NfsResource) -> None:
+ self._nfs = nfs
+
+ self.create = to_raw_response_wrapper(
+ nfs.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ nfs.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ nfs.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ nfs.delete,
+ )
+ self.initiate_action = to_raw_response_wrapper(
+ nfs.initiate_action,
+ )
+
+ @cached_property
+ def snapshots(self) -> SnapshotsResourceWithRawResponse:
+ """
+ NFS lets you create fully managed, POSIX-compliant network file storage that delivers secure,
+ high-performance shared storage right inside your VPC. This enables seamless data sharing across Droplets in a VPC.
+ """
+ return SnapshotsResourceWithRawResponse(self._nfs.snapshots)
+
+
+class AsyncNfsResourceWithRawResponse:
+ def __init__(self, nfs: AsyncNfsResource) -> None:
+ self._nfs = nfs
+
+ self.create = async_to_raw_response_wrapper(
+ nfs.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ nfs.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ nfs.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ nfs.delete,
+ )
+ self.initiate_action = async_to_raw_response_wrapper(
+ nfs.initiate_action,
+ )
+
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResourceWithRawResponse:
+ """
+ NFS lets you create fully managed, POSIX-compliant network file storage that delivers secure,
+ high-performance shared storage right inside your VPC. This enables seamless data sharing across Droplets in a VPC.
+ """
+ return AsyncSnapshotsResourceWithRawResponse(self._nfs.snapshots)
+
+
+class NfsResourceWithStreamingResponse:
+ def __init__(self, nfs: NfsResource) -> None:
+ self._nfs = nfs
+
+ self.create = to_streamed_response_wrapper(
+ nfs.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ nfs.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ nfs.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ nfs.delete,
+ )
+ self.initiate_action = to_streamed_response_wrapper(
+ nfs.initiate_action,
+ )
+
+ @cached_property
+ def snapshots(self) -> SnapshotsResourceWithStreamingResponse:
+ """
+ NFS lets you create fully managed, POSIX-compliant network file storage that delivers secure,
+ high-performance shared storage right inside your VPC. This enables seamless data sharing across Droplets in a VPC.
+ """
+ return SnapshotsResourceWithStreamingResponse(self._nfs.snapshots)
+
+
+class AsyncNfsResourceWithStreamingResponse:
+ def __init__(self, nfs: AsyncNfsResource) -> None:
+ self._nfs = nfs
+
+ self.create = async_to_streamed_response_wrapper(
+ nfs.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ nfs.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ nfs.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ nfs.delete,
+ )
+ self.initiate_action = async_to_streamed_response_wrapper(
+ nfs.initiate_action,
+ )
+
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResourceWithStreamingResponse:
+ """
+ NFS lets you create fully managed, POSIX-compliant network file storage that delivers secure,
+ high-performance shared storage right inside your VPC. This enables seamless data sharing across Droplets in a VPC.
+ """
+ return AsyncSnapshotsResourceWithStreamingResponse(self._nfs.snapshots)
diff --git a/src/gradient/resources/nfs/snapshots.py b/src/gradient/resources/nfs/snapshots.py
new file mode 100644
index 00000000..209e7da9
--- /dev/null
+++ b/src/gradient/resources/nfs/snapshots.py
@@ -0,0 +1,428 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...types.nfs import snapshot_list_params, snapshot_delete_params, snapshot_retrieve_params
+from ..._base_client import make_request_options
+from ...types.nfs.snapshot_list_response import SnapshotListResponse
+from ...types.nfs.snapshot_retrieve_response import SnapshotRetrieveResponse
+
+__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"]
+
+
+class SnapshotsResource(SyncAPIResource):
+ """
+ NFS lets you create fully managed, POSIX-compliant network file storage that delivers secure,
+ high-performance shared storage right inside your VPC. This enables seamless data sharing across Droplets in a VPC.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> SnapshotsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return SnapshotsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return SnapshotsResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ nfs_snapshot_id: str,
+ *,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotRetrieveResponse:
+ """
+ To get an NFS snapshot, send a GET request to
+ `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`.
+
+ A successful request will return the NFS snapshot.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}")
+ return self._get(
+ f"/v2/nfs/snapshots/{nfs_snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"region": region}, snapshot_retrieve_params.SnapshotRetrieveParams),
+ ),
+ cast_to=SnapshotRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ region: str | Omit = omit,
+ share_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotListResponse:
+ """
+ To list all NFS snapshots, send a GET request to
+ `/v2/nfs/snapshots?region=${region}&share_id={share_id}`.
+
+ A successful request will return all NFS snapshots belonging to the
+ authenticated user in the specified region.
+
+ Optionally, you can filter snapshots by a specific NFS share by including the
+ `share_id` query parameter.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ share_id: The unique ID of an NFS share. If provided, only snapshots of this specific
+ share will be returned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/nfs/snapshots"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/nfs/snapshots",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "region": region,
+ "share_id": share_id,
+ },
+ snapshot_list_params.SnapshotListParams,
+ ),
+ ),
+ cast_to=SnapshotListResponse,
+ )
+
+ def delete(
+ self,
+ nfs_snapshot_id: str,
+ *,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete an NFS snapshot, send a DELETE request to
+ `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`.
+
+ A successful request will return a `204 No Content` status code.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/nfs/snapshots/{nfs_snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"region": region}, snapshot_delete_params.SnapshotDeleteParams),
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncSnapshotsResource(AsyncAPIResource):
+ """
+ NFS lets you create fully managed, POSIX-compliant network file storage that delivers secure,
+ high-performance shared storage right inside your VPC. This enables seamless data sharing across Droplets in a VPC.
+ """
+
+ @cached_property
+ def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncSnapshotsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncSnapshotsResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ nfs_snapshot_id: str,
+ *,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotRetrieveResponse:
+ """
+ To get an NFS snapshot, send a GET request to
+ `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`.
+
+ A successful request will return the NFS snapshot.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}")
+ return await self._get(
+ f"/v2/nfs/snapshots/{nfs_snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"region": region}, snapshot_retrieve_params.SnapshotRetrieveParams),
+ ),
+ cast_to=SnapshotRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ region: str | Omit = omit,
+ share_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotListResponse:
+ """
+ To list all NFS snapshots, send a GET request to
+ `/v2/nfs/snapshots?region=${region}&share_id={share_id}`.
+
+ A successful request will return all NFS snapshots belonging to the
+ authenticated user in the specified region.
+
+ Optionally, you can filter snapshots by a specific NFS share by including the
+ `share_id` query parameter.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ share_id: The unique ID of an NFS share. If provided, only snapshots of this specific
+ share will be returned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/nfs/snapshots"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/nfs/snapshots",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "region": region,
+ "share_id": share_id,
+ },
+ snapshot_list_params.SnapshotListParams,
+ ),
+ ),
+ cast_to=SnapshotListResponse,
+ )
+
+ async def delete(
+ self,
+ nfs_snapshot_id: str,
+ *,
+ region: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete an NFS snapshot, send a DELETE request to
+ `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`.
+
+ A successful request will return a `204 No Content` status code.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/nfs/snapshots/{nfs_snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"region": region}, snapshot_delete_params.SnapshotDeleteParams),
+ ),
+ cast_to=NoneType,
+ )
+
+
+class SnapshotsResourceWithRawResponse:
+ def __init__(self, snapshots: SnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = to_raw_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class AsyncSnapshotsResourceWithRawResponse:
+ def __init__(self, snapshots: AsyncSnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = async_to_raw_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class SnapshotsResourceWithStreamingResponse:
+ def __init__(self, snapshots: SnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = to_streamed_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class AsyncSnapshotsResourceWithStreamingResponse:
+ def __init__(self, snapshots: AsyncSnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ snapshots.delete,
+ )
diff --git a/src/gradient/resources/regions.py b/src/gradient/resources/regions.py
new file mode 100644
index 00000000..e89f7c0c
--- /dev/null
+++ b/src/gradient/resources/regions.py
@@ -0,0 +1,201 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..types import region_list_params
+from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.region_list_response import RegionListResponse
+
+__all__ = ["RegionsResource", "AsyncRegionsResource"]
+
+
+class RegionsResource(SyncAPIResource):
+ """Provides information about DigitalOcean data center regions."""
+
+ @cached_property
+ def with_raw_response(self) -> RegionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return RegionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return RegionsResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RegionListResponse:
+ """
+ To list all of the regions that are available, send a GET request to
+ `/v2/regions`. The response will be a JSON object with a key called `regions`.
+ The value of this will be an array of `region` objects, each of which will
+ contain the standard region attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/regions" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/regions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ region_list_params.RegionListParams,
+ ),
+ ),
+ cast_to=RegionListResponse,
+ )
+
+
+class AsyncRegionsResource(AsyncAPIResource):
+ """Provides information about DigitalOcean data center regions."""
+
+ @cached_property
+ def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRegionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncRegionsResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ *,
+ page: int | Omit = omit,
+ per_page: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RegionListResponse:
+ """
+ To list all of the regions that are available, send a GET request to
+ `/v2/regions`. The response will be a JSON object with a key called `regions`.
+ The value of this will be an array of `region` objects, each of which will
+ contain the standard region attributes.
+
+ Args:
+ page: Which 'page' of paginated results to return.
+
+ per_page: Number of items returned per page
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/regions" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/regions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ region_list_params.RegionListParams,
+ ),
+ ),
+ cast_to=RegionListResponse,
+ )
+
+
+class RegionsResourceWithRawResponse:
+ def __init__(self, regions: RegionsResource) -> None:
+ self._regions = regions
+
+ self.list = to_raw_response_wrapper(
+ regions.list,
+ )
+
+
+class AsyncRegionsResourceWithRawResponse:
+ def __init__(self, regions: AsyncRegionsResource) -> None:
+ self._regions = regions
+
+ self.list = async_to_raw_response_wrapper(
+ regions.list,
+ )
+
+
+class RegionsResourceWithStreamingResponse:
+ def __init__(self, regions: RegionsResource) -> None:
+ self._regions = regions
+
+ self.list = to_streamed_response_wrapper(
+ regions.list,
+ )
+
+
+class AsyncRegionsResourceWithStreamingResponse:
+ def __init__(self, regions: AsyncRegionsResource) -> None:
+ self._regions = regions
+
+ self.list = async_to_streamed_response_wrapper(
+ regions.list,
+ )
diff --git a/src/gradient/resources/responses.py b/src/gradient/resources/responses.py
new file mode 100644
index 00000000..936a97a8
--- /dev/null
+++ b/src/gradient/resources/responses.py
@@ -0,0 +1,864 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ..types import response_create_params
+from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from .._utils import required_args, maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._streaming import Stream, AsyncStream
+from .._base_client import make_request_options
+from ..types.shared.create_response_response import CreateResponseResponse
+from ..types.shared.create_response_stream_response import CreateResponseStreamResponse
+
+__all__ = ["ResponsesResource", "AsyncResponsesResource"]
+
+
+class ResponsesResource(SyncAPIResource):
+ """Generate text-to-text responses from text prompts."""
+
+ @cached_property
+ def with_raw_response(self) -> ResponsesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ResponsesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ResponsesResourceWithStreamingResponse(self)
+
+ @overload
+ def create(
+ self,
+ *,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ instructions: Optional[str] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ modalities: Optional[List[Literal["text"]]] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CreateResponseResponse:
+ """Generate text responses from text prompts.
+
+ This endpoint supports both streaming
+ and non-streaming responses for VLLM models only.
+
+ Args:
+ input: The input text prompt or conversation history. Can be a string or an array of
+ message objects for conversation context.
+
+ model: Model ID used to generate the response. Must be a VLLM model.
+
+ instructions: System-level instructions for the model. This sets the behavior and context for
+ the response generation.
+
+ max_output_tokens: Maximum number of tokens to generate in the response. If not specified, the
+ model will use a default value.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion. Alias for
+ max_output_tokens for compatibility.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ modalities: Specifies the output types the model should generate. For text-to-text, this
+ should be ["text"].
+
+ parallel_tool_calls: Whether to enable parallel tool calls. When true, the model can make multiple
+ tool calls in parallel.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool. Uses Responses API format (with `name`, `description`, `parameters` at top
+ level).
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ stream: Literal[True],
+ instructions: Optional[str] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ modalities: Optional[List[Literal["text"]]] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Stream[CreateResponseStreamResponse]:
+ """Generate text responses from text prompts.
+
+ This endpoint supports both streaming
+ and non-streaming responses for VLLM models only.
+
+ Args:
+ input: The input text prompt or conversation history. Can be a string or an array of
+ message objects for conversation context.
+
+ model: Model ID used to generate the response. Must be a VLLM model.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ instructions: System-level instructions for the model. This sets the behavior and context for
+ the response generation.
+
+ max_output_tokens: Maximum number of tokens to generate in the response. If not specified, the
+ model will use a default value.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion. Alias for
+ max_output_tokens for compatibility.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ modalities: Specifies the output types the model should generate. For text-to-text, this
+ should be ["text"].
+
+ parallel_tool_calls: Whether to enable parallel tool calls. When true, the model can make multiple
+ tool calls in parallel.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool. Uses Responses API format (with `name`, `description`, `parameters` at top
+ level).
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ stream: bool,
+ instructions: Optional[str] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ modalities: Optional[List[Literal["text"]]] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CreateResponseResponse | Stream[CreateResponseStreamResponse]:
+ """Generate text responses from text prompts.
+
+ This endpoint supports both streaming
+ and non-streaming responses for VLLM models only.
+
+ Args:
+ input: The input text prompt or conversation history. Can be a string or an array of
+ message objects for conversation context.
+
+ model: Model ID used to generate the response. Must be a VLLM model.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ instructions: System-level instructions for the model. This sets the behavior and context for
+ the response generation.
+
+ max_output_tokens: Maximum number of tokens to generate in the response. If not specified, the
+ model will use a default value.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion. Alias for
+ max_output_tokens for compatibility.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ modalities: Specifies the output types the model should generate. For text-to-text, this
+ should be ["text"].
+
+ parallel_tool_calls: Whether to enable parallel tool calls. When true, the model can make multiple
+ tool calls in parallel.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool. Uses Responses API format (with `name`, `description`, `parameters` at top
+ level).
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["input", "model"], ["input", "model", "stream"])
+ def create(
+ self,
+ *,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ instructions: Optional[str] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ modalities: Optional[List[Literal["text"]]] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CreateResponseResponse | Stream[CreateResponseStreamResponse]:
+ return self._post(
+ "/responses" if self._client._base_url_overridden else f"{self._client.inference_endpoint}/v1/responses",
+ body=maybe_transform(
+ {
+ "input": input,
+ "model": model,
+ "instructions": instructions,
+ "max_output_tokens": max_output_tokens,
+ "max_tokens": max_tokens,
+ "metadata": metadata,
+ "modalities": modalities,
+ "parallel_tool_calls": parallel_tool_calls,
+ "stop": stop,
+ "stream": stream,
+ "stream_options": stream_options,
+ "temperature": temperature,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_p": top_p,
+ "user": user,
+ },
+ response_create_params.ResponseCreateParamsStreaming
+ if stream
+ else response_create_params.ResponseCreateParamsNonStreaming,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=CreateResponseResponse,
+ stream=stream or False,
+ stream_cls=Stream[CreateResponseStreamResponse],
+ )
+
+
+class AsyncResponsesResource(AsyncAPIResource):
+ """Generate text-to-text responses from text prompts."""
+
+ @cached_property
+ def with_raw_response(self) -> AsyncResponsesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncResponsesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncResponsesResourceWithStreamingResponse(self)
+
+ @overload
+ async def create(
+ self,
+ *,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ instructions: Optional[str] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ modalities: Optional[List[Literal["text"]]] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CreateResponseResponse:
+ """Generate text responses from text prompts.
+
+ This endpoint supports both streaming
+ and non-streaming responses for VLLM models only.
+
+ Args:
+ input: The input text prompt or conversation history. Can be a string or an array of
+ message objects for conversation context.
+
+ model: Model ID used to generate the response. Must be a VLLM model.
+
+ instructions: System-level instructions for the model. This sets the behavior and context for
+ the response generation.
+
+ max_output_tokens: Maximum number of tokens to generate in the response. If not specified, the
+ model will use a default value.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion. Alias for
+ max_output_tokens for compatibility.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ modalities: Specifies the output types the model should generate. For text-to-text, this
+ should be ["text"].
+
+ parallel_tool_calls: Whether to enable parallel tool calls. When true, the model can make multiple
+ tool calls in parallel.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool. Uses Responses API format (with `name`, `description`, `parameters` at top
+ level).
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ stream: Literal[True],
+ instructions: Optional[str] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ modalities: Optional[List[Literal["text"]]] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncStream[CreateResponseStreamResponse]:
+ """Generate text responses from text prompts.
+
+ This endpoint supports both streaming
+ and non-streaming responses for VLLM models only.
+
+ Args:
+ input: The input text prompt or conversation history. Can be a string or an array of
+ message objects for conversation context.
+
+ model: Model ID used to generate the response. Must be a VLLM model.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ instructions: System-level instructions for the model. This sets the behavior and context for
+ the response generation.
+
+ max_output_tokens: Maximum number of tokens to generate in the response. If not specified, the
+ model will use a default value.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion. Alias for
+ max_output_tokens for compatibility.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ modalities: Specifies the output types the model should generate. For text-to-text, this
+ should be ["text"].
+
+ parallel_tool_calls: Whether to enable parallel tool calls. When true, the model can make multiple
+ tool calls in parallel.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool. Uses Responses API format (with `name`, `description`, `parameters` at top
+ level).
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ stream: bool,
+ instructions: Optional[str] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ modalities: Optional[List[Literal["text"]]] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CreateResponseResponse | AsyncStream[CreateResponseStreamResponse]:
+ """Generate text responses from text prompts.
+
+ This endpoint supports both streaming
+ and non-streaming responses for VLLM models only.
+
+ Args:
+ input: The input text prompt or conversation history. Can be a string or an array of
+ message objects for conversation context.
+
+ model: Model ID used to generate the response. Must be a VLLM model.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+
+ instructions: System-level instructions for the model. This sets the behavior and context for
+ the response generation.
+
+ max_output_tokens: Maximum number of tokens to generate in the response. If not specified, the
+ model will use a default value.
+
+ max_tokens: The maximum number of tokens that can be generated in the completion. Alias for
+ max_output_tokens for compatibility.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+
+ modalities: Specifies the output types the model should generate. For text-to-text, this
+ should be ["text"].
+
+ parallel_tool_calls: Whether to enable parallel tool calls. When true, the model can make multiple
+ tool calls in parallel.
+
+ stop: Up to 4 sequences where the API will stop generating further tokens. The
+ returned text will not contain the stop sequence.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+
+ tools: A list of tools the model may call. Currently, only functions are supported as a
+ tool. Uses Responses API format (with `name`, `description`, `parameters` at top
+ level).
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["input", "model"], ["input", "model", "stream"])
+ async def create(
+ self,
+ *,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ instructions: Optional[str] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tokens: Optional[int] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ modalities: Optional[List[Literal["text"]]] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ user: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CreateResponseResponse | AsyncStream[CreateResponseStreamResponse]:
+ return await self._post(
+ "/responses" if self._client._base_url_overridden else f"{self._client.inference_endpoint}/v1/responses",
+ body=await async_maybe_transform(
+ {
+ "input": input,
+ "model": model,
+ "instructions": instructions,
+ "max_output_tokens": max_output_tokens,
+ "max_tokens": max_tokens,
+ "metadata": metadata,
+ "modalities": modalities,
+ "parallel_tool_calls": parallel_tool_calls,
+ "stop": stop,
+ "stream": stream,
+ "stream_options": stream_options,
+ "temperature": temperature,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_p": top_p,
+ "user": user,
+ },
+ response_create_params.ResponseCreateParamsStreaming
+ if stream
+ else response_create_params.ResponseCreateParamsNonStreaming,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=CreateResponseResponse,
+ stream=stream or False,
+ stream_cls=AsyncStream[CreateResponseStreamResponse],
+ )
+
+
+class ResponsesResourceWithRawResponse:
+ def __init__(self, responses: ResponsesResource) -> None:
+ self._responses = responses
+
+ self.create = to_raw_response_wrapper(
+ responses.create,
+ )
+
+
+class AsyncResponsesResourceWithRawResponse:
+ def __init__(self, responses: AsyncResponsesResource) -> None:
+ self._responses = responses
+
+ self.create = async_to_raw_response_wrapper(
+ responses.create,
+ )
+
+
+class ResponsesResourceWithStreamingResponse:
+ def __init__(self, responses: ResponsesResource) -> None:
+ self._responses = responses
+
+ self.create = to_streamed_response_wrapper(
+ responses.create,
+ )
+
+
+class AsyncResponsesResourceWithStreamingResponse:
+ def __init__(self, responses: AsyncResponsesResource) -> None:
+ self._responses = responses
+
+ self.create = async_to_streamed_response_wrapper(
+ responses.create,
+ )
diff --git a/src/gradient/resources/retrieve.py b/src/gradient/resources/retrieve.py
new file mode 100644
index 00000000..992fa37f
--- /dev/null
+++ b/src/gradient/resources/retrieve.py
@@ -0,0 +1,259 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..types import retrieve_documents_params
+from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.retrieve_documents_response import RetrieveDocumentsResponse
+
+__all__ = ["RetrieveResource", "AsyncRetrieveResource"]
+
+
+class RetrieveResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RetrieveResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return RetrieveResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RetrieveResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return RetrieveResourceWithStreamingResponse(self)
+
+ def documents(
+ self,
+ knowledge_base_id: str,
+ *,
+ num_results: int,
+ query: str,
+ alpha: float | Omit = omit,
+ filters: retrieve_documents_params.Filters | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RetrieveDocumentsResponse:
+ """
+ Retrieve relevant documents from a knowledge base using semantic search.
+
+ This endpoint:
+
+ 1. Authenticates the request using the provided bearer token
+ 2. Generates embeddings for the query using the knowledge base's configured
+ model
+ 3. Performs vector similarity search in the knowledge base
+ 4. Returns the most relevant document chunks
+
+ The search supports hybrid search combining:
+
+ - Vector similarity (semantic search)
+ - Keyword matching (BM25)
+ - Custom metadata filters
+
+ Args:
+ num_results: Number of results to return
+
+ query: The search query text
+
+ alpha:
+ Weight for hybrid search (0-1):
+
+ - 0 = pure keyword search (BM25)
+ - 1 = pure vector search (default)
+ - 0.5 = balanced hybrid search
+
+ filters: Metadata filters to apply to the search
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_id:
+ raise ValueError(f"Expected a non-empty value for `knowledge_base_id` but received {knowledge_base_id!r}")
+ return self._post(
+ (
+ f"/{knowledge_base_id}/retrieve"
+ if self._client._base_url_overridden
+ else f"https://kbaas.do-ai.run/v1/{knowledge_base_id}/retrieve"
+ ),
+ body=maybe_transform(
+ {
+ "num_results": num_results,
+ "query": query,
+ "alpha": alpha,
+ "filters": filters,
+ },
+ retrieve_documents_params.RetrieveDocumentsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=RetrieveDocumentsResponse,
+ )
+
+
+class AsyncRetrieveResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRetrieveResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRetrieveResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRetrieveResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncRetrieveResourceWithStreamingResponse(self)
+
+ async def documents(
+ self,
+ knowledge_base_id: str,
+ *,
+ num_results: int,
+ query: str,
+ alpha: float | Omit = omit,
+ filters: retrieve_documents_params.Filters | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> RetrieveDocumentsResponse:
+ """
+ Retrieve relevant documents from a knowledge base using semantic search.
+
+ This endpoint:
+
+ 1. Authenticates the request using the provided bearer token
+ 2. Generates embeddings for the query using the knowledge base's configured
+ model
+ 3. Performs vector similarity search in the knowledge base
+ 4. Returns the most relevant document chunks
+
+ The search supports hybrid search combining:
+
+ - Vector similarity (semantic search)
+ - Keyword matching (BM25)
+ - Custom metadata filters
+
+ Args:
+ num_results: Number of results to return
+
+ query: The search query text
+
+ alpha:
+ Weight for hybrid search (0-1):
+
+ - 0 = pure keyword search (BM25)
+ - 1 = pure vector search (default)
+ - 0.5 = balanced hybrid search
+
+ filters: Metadata filters to apply to the search
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_id:
+ raise ValueError(f"Expected a non-empty value for `knowledge_base_id` but received {knowledge_base_id!r}")
+ return await self._post(
+ (
+ f"/{knowledge_base_id}/retrieve"
+ if self._client._base_url_overridden
+ else f"https://kbaas.do-ai.run/v1/{knowledge_base_id}/retrieve"
+ ),
+ body=await async_maybe_transform(
+ {
+ "num_results": num_results,
+ "query": query,
+ "alpha": alpha,
+ "filters": filters,
+ },
+ retrieve_documents_params.RetrieveDocumentsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=RetrieveDocumentsResponse,
+ )
+
+
+class RetrieveResourceWithRawResponse:
+ def __init__(self, retrieve: RetrieveResource) -> None:
+ self._retrieve = retrieve
+
+ self.documents = to_raw_response_wrapper(
+ retrieve.documents,
+ )
+
+
+class AsyncRetrieveResourceWithRawResponse:
+ def __init__(self, retrieve: AsyncRetrieveResource) -> None:
+ self._retrieve = retrieve
+
+ self.documents = async_to_raw_response_wrapper(
+ retrieve.documents,
+ )
+
+
+class RetrieveResourceWithStreamingResponse:
+ def __init__(self, retrieve: RetrieveResource) -> None:
+ self._retrieve = retrieve
+
+ self.documents = to_streamed_response_wrapper(
+ retrieve.documents,
+ )
+
+
+class AsyncRetrieveResourceWithStreamingResponse:
+ def __init__(self, retrieve: AsyncRetrieveResource) -> None:
+ self._retrieve = retrieve
+
+ self.documents = async_to_streamed_response_wrapper(
+ retrieve.documents,
+ )
diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py
new file mode 100644
index 00000000..fc2907b2
--- /dev/null
+++ b/src/gradient/types/__init__.py
@@ -0,0 +1,237 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from . import (
+ agents,
+ models,
+ api_agent,
+ api_workspace,
+ agent_create_response,
+ agent_delete_response,
+ agent_update_response,
+ agent_retrieve_response,
+ agent_update_status_response,
+)
+from .. import _compat
+from .agents import evaluation_metrics # type: ignore # noqa: F401
+from .models import providers # type: ignore # noqa: F401
+from .shared import (
+ Size as Size,
+ Image as Image,
+ Action as Action,
+ Kernel as Kernel,
+ Region as Region,
+ APIMeta as APIMeta,
+ Droplet as Droplet,
+ GPUInfo as GPUInfo,
+ APILinks as APILinks,
+ DiskInfo as DiskInfo,
+ NetworkV4 as NetworkV4,
+ NetworkV6 as NetworkV6,
+ PageLinks as PageLinks,
+ Snapshots as Snapshots,
+ ActionLink as ActionLink,
+ VpcPeering as VpcPeering,
+ ForwardLinks as ForwardLinks,
+ Subscription as Subscription,
+ BackwardLinks as BackwardLinks,
+ MetaProperties as MetaProperties,
+ CompletionUsage as CompletionUsage,
+ GarbageCollection as GarbageCollection,
+ FirewallRuleTarget as FirewallRuleTarget,
+ ChatCompletionChunk as ChatCompletionChunk,
+ ImageGenStreamEvent as ImageGenStreamEvent,
+ SubscriptionTierBase as SubscriptionTierBase,
+ CreateResponseResponse as CreateResponseResponse,
+ ImageGenCompletedEvent as ImageGenCompletedEvent,
+ DropletNextBackupWindow as DropletNextBackupWindow,
+ ImageGenPartialImageEvent as ImageGenPartialImageEvent,
+ ChatCompletionTokenLogprob as ChatCompletionTokenLogprob,
+ CreateResponseStreamResponse as CreateResponseStreamResponse,
+)
+from .api_agent import APIAgent as APIAgent
+from .api_model import APIModel as APIModel
+from .api_agreement import APIAgreement as APIAgreement
+from .api_workspace import APIWorkspace as APIWorkspace
+from .nf_list_params import NfListParams as NfListParams
+from .api_agent_model import APIAgentModel as APIAgentModel
+from .nf_create_params import NfCreateParams as NfCreateParams
+from .nf_delete_params import NfDeleteParams as NfDeleteParams
+from .nf_list_response import NfListResponse as NfListResponse
+from .agent_list_params import AgentListParams as AgentListParams
+from .api_model_version import APIModelVersion as APIModelVersion
+from .model_list_params import ModelListParams as ModelListParams
+from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase
+from .nf_create_response import NfCreateResponse as NfCreateResponse
+from .nf_retrieve_params import NfRetrieveParams as NfRetrieveParams
+from .region_list_params import RegionListParams as RegionListParams
+from .agent_create_params import AgentCreateParams as AgentCreateParams
+from .agent_list_response import AgentListResponse as AgentListResponse
+from .agent_update_params import AgentUpdateParams as AgentUpdateParams
+from .model_list_response import ModelListResponse as ModelListResponse
+from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod
+from .nf_retrieve_response import NfRetrieveResponse as NfRetrieveResponse
+from .region_list_response import RegionListResponse as RegionListResponse
+from .agent_create_response import AgentCreateResponse as AgentCreateResponse
+from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse
+from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse
+from .droplet_backup_policy import DropletBackupPolicy as DropletBackupPolicy
+from .image_generate_params import ImageGenerateParams as ImageGenerateParams
+from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo
+from .response_create_params import ResponseCreateParams as ResponseCreateParams
+from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse
+from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo
+from .gpu_droplet_list_params import GPUDropletListParams as GPUDropletListParams
+from .image_generate_response import ImageGenerateResponse as ImageGenerateResponse
+from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility
+from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams
+from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse
+from .nf_initiate_action_params import NfInitiateActionParams as NfInitiateActionParams
+from .retrieve_documents_params import RetrieveDocumentsParams as RetrieveDocumentsParams
+from .agent_update_status_params import (
+ AgentUpdateStatusParams as AgentUpdateStatusParams,
+)
+from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo
+from .knowledge_base_list_params import (
+ KnowledgeBaseListParams as KnowledgeBaseListParams,
+)
+from .agent_retrieve_usage_params import AgentRetrieveUsageParams as AgentRetrieveUsageParams
+from .droplet_backup_policy_param import (
+ DropletBackupPolicyParam as DropletBackupPolicyParam,
+)
+from .gpu_droplet_create_response import (
+ GPUDropletCreateResponse as GPUDropletCreateResponse,
+)
+from .nf_initiate_action_response import NfInitiateActionResponse as NfInitiateActionResponse
+from .retrieve_documents_response import RetrieveDocumentsResponse as RetrieveDocumentsResponse
+from .agent_update_status_response import (
+ AgentUpdateStatusResponse as AgentUpdateStatusResponse,
+)
+from .billing_list_insights_params import BillingListInsightsParams as BillingListInsightsParams
+from .knowledge_base_create_params import (
+ KnowledgeBaseCreateParams as KnowledgeBaseCreateParams,
+)
+from .knowledge_base_list_response import (
+ KnowledgeBaseListResponse as KnowledgeBaseListResponse,
+)
+from .knowledge_base_update_params import (
+ KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams,
+)
+from .agent_retrieve_usage_response import AgentRetrieveUsageResponse as AgentRetrieveUsageResponse
+from .gpu_droplet_retrieve_response import (
+ GPUDropletRetrieveResponse as GPUDropletRetrieveResponse,
+)
+from .billing_list_insights_response import (
+ BillingListInsightsResponse as BillingListInsightsResponse,
+)
+from .knowledge_base_create_response import (
+ KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse,
+)
+from .knowledge_base_delete_response import (
+ KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse,
+)
+from .knowledge_base_update_response import (
+ KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse,
+)
+from .gpu_droplet_list_kernels_params import (
+ GPUDropletListKernelsParams as GPUDropletListKernelsParams,
+)
+from .agents.evaluation_metrics.openai import (
+ key_list_agents_response, # type: ignore # noqa: F401
+)
+from .gpu_droplet_delete_by_tag_params import (
+ GPUDropletDeleteByTagParams as GPUDropletDeleteByTagParams,
+)
+from .knowledge_base_retrieve_response import (
+ KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse,
+)
+from .gpu_droplet_list_firewalls_params import (
+ GPUDropletListFirewallsParams as GPUDropletListFirewallsParams,
+)
+from .gpu_droplet_list_kernels_response import (
+ GPUDropletListKernelsResponse as GPUDropletListKernelsResponse,
+)
+from .gpu_droplet_list_snapshots_params import (
+ GPUDropletListSnapshotsParams as GPUDropletListSnapshotsParams,
+)
+from .agents.evaluation_metrics.anthropic import (
+ key_list_response, # type: ignore # noqa: F401
+)
+from .gpu_droplet_list_firewalls_response import (
+ GPUDropletListFirewallsResponse as GPUDropletListFirewallsResponse,
+)
+from .gpu_droplet_list_neighbors_response import (
+ GPUDropletListNeighborsResponse as GPUDropletListNeighborsResponse,
+)
+from .gpu_droplet_list_snapshots_response import (
+ GPUDropletListSnapshotsResponse as GPUDropletListSnapshotsResponse,
+)
+from .agents.evaluation_metrics.workspaces import (
+ agent_list_response, # type: ignore # noqa: F401
+ agent_move_response, # type: ignore # noqa: F401
+)
+from .knowledge_base_list_indexing_jobs_response import (
+ KnowledgeBaseListIndexingJobsResponse as KnowledgeBaseListIndexingJobsResponse,
+)
+
+# Rebuild cyclical models only after all modules are imported.
+# This ensures that, when building the deferred (due to cyclical references) model schema,
+# Pydantic can resolve the necessary references.
+# See: https://github.com/pydantic/pydantic/issues/11250 for more context.
+if _compat.PYDANTIC_V1:
+ api_agent.APIAgent.update_forward_refs() # type: ignore
+ api_workspace.APIWorkspace.update_forward_refs() # type: ignore
+ agent_create_response.AgentCreateResponse.update_forward_refs() # type: ignore
+ agent_retrieve_response.AgentRetrieveResponse.update_forward_refs() # type: ignore
+ agent_update_response.AgentUpdateResponse.update_forward_refs() # type: ignore
+ agent_delete_response.AgentDeleteResponse.update_forward_refs() # type: ignore
+ agent_update_status_response.AgentUpdateStatusResponse.update_forward_refs() # type: ignore
+ agents.evaluation_metrics.workspace_create_response.WorkspaceCreateResponse.update_forward_refs() # type: ignore
+ agents.evaluation_metrics.workspace_retrieve_response.WorkspaceRetrieveResponse.update_forward_refs() # type: ignore
+ agents.evaluation_metrics.workspace_update_response.WorkspaceUpdateResponse.update_forward_refs() # type: ignore
+ agents.evaluation_metrics.workspace_list_response.WorkspaceListResponse.update_forward_refs() # type: ignore
+ agents.evaluation_metrics.workspaces.agent_list_response.AgentListResponse.update_forward_refs() # type: ignore
+ agents.evaluation_metrics.workspaces.agent_move_response.AgentMoveResponse.update_forward_refs() # type: ignore
+ agents.evaluation_metrics.anthropic.key_list_agents_response.KeyListAgentsResponse.update_forward_refs() # type: ignore
+ agents.evaluation_metrics.openai.key_list_agents_response.KeyListAgentsResponse.update_forward_refs() # type: ignore
+ agents.function_create_response.FunctionCreateResponse.update_forward_refs() # type: ignore
+ agents.function_update_response.FunctionUpdateResponse.update_forward_refs() # type: ignore
+ agents.function_delete_response.FunctionDeleteResponse.update_forward_refs() # type: ignore
+ agents.api_link_knowledge_base_output.APILinkKnowledgeBaseOutput.update_forward_refs() # type: ignore
+ agents.knowledge_base_detach_response.KnowledgeBaseDetachResponse.update_forward_refs() # type: ignore
+ agents.route_view_response.RouteViewResponse.update_forward_refs() # type: ignore
+ models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.update_forward_refs() # type: ignore
+ models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.update_forward_refs() # type: ignore
+else:
+ api_agent.APIAgent.model_rebuild(_parent_namespace_depth=0)
+ api_workspace.APIWorkspace.model_rebuild(_parent_namespace_depth=0)
+ agent_create_response.AgentCreateResponse.model_rebuild(_parent_namespace_depth=0)
+ agent_retrieve_response.AgentRetrieveResponse.model_rebuild(_parent_namespace_depth=0)
+ agent_update_response.AgentUpdateResponse.model_rebuild(_parent_namespace_depth=0)
+ agent_delete_response.AgentDeleteResponse.model_rebuild(_parent_namespace_depth=0)
+ agent_update_status_response.AgentUpdateStatusResponse.model_rebuild(_parent_namespace_depth=0)
+ agents.evaluation_metrics.workspace_create_response.WorkspaceCreateResponse.model_rebuild(_parent_namespace_depth=0)
+ agents.evaluation_metrics.workspace_retrieve_response.WorkspaceRetrieveResponse.model_rebuild(
+ _parent_namespace_depth=0
+ )
+ agents.evaluation_metrics.workspace_update_response.WorkspaceUpdateResponse.model_rebuild(_parent_namespace_depth=0)
+ agents.evaluation_metrics.workspace_list_response.WorkspaceListResponse.model_rebuild(_parent_namespace_depth=0)
+ agents.evaluation_metrics.workspaces.agent_list_response.AgentListResponse.model_rebuild(_parent_namespace_depth=0)
+ agents.evaluation_metrics.workspaces.agent_move_response.AgentMoveResponse.model_rebuild(_parent_namespace_depth=0)
+ agents.evaluation_metrics.anthropic.key_list_agents_response.KeyListAgentsResponse.model_rebuild(
+ _parent_namespace_depth=0
+ )
+ agents.evaluation_metrics.openai.key_list_agents_response.KeyListAgentsResponse.model_rebuild(
+ _parent_namespace_depth=0
+ )
+ agents.function_create_response.FunctionCreateResponse.model_rebuild(_parent_namespace_depth=0)
+ agents.function_update_response.FunctionUpdateResponse.model_rebuild(_parent_namespace_depth=0)
+ agents.function_delete_response.FunctionDeleteResponse.model_rebuild(_parent_namespace_depth=0)
+ agents.api_link_knowledge_base_output.APILinkKnowledgeBaseOutput.model_rebuild(_parent_namespace_depth=0)
+ agents.knowledge_base_detach_response.KnowledgeBaseDetachResponse.model_rebuild(_parent_namespace_depth=0)
+ agents.route_view_response.RouteViewResponse.model_rebuild(_parent_namespace_depth=0)
+ models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.model_rebuild(_parent_namespace_depth=0)
+ models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.model_rebuild(
+ _parent_namespace_depth=0
+ )
diff --git a/src/gradient/types/agent_create_params.py b/src/gradient/types/agent_create_params.py
new file mode 100644
index 00000000..343c5d70
--- /dev/null
+++ b/src/gradient/types/agent_create_params.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from .._types import SequenceNotStr
+from .._utils import PropertyInfo
+
+__all__ = ["AgentCreateParams"]
+
+
+class AgentCreateParams(TypedDict, total=False):
+ anthropic_key_uuid: str
+ """Optional Anthropic API key ID to use with Anthropic models"""
+
+ description: str
+ """A text description of the agent, not used in inference"""
+
+ instruction: str
+ """Agent instruction.
+
+ Instructions help your agent to perform its job effectively. See
+ [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions)
+ for best practices.
+ """
+
+ knowledge_base_uuid: SequenceNotStr[str]
+ """Ids of the knowledge base(s) to attach to the agent"""
+
+ model_provider_key_uuid: str
+
+ model_uuid: str
+ """Identifier for the foundation model."""
+
+ name: str
+ """Agent name"""
+
+ openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")]
+ """Optional OpenAI API key ID to use with OpenAI models"""
+
+ project_id: str
+ """The id of the DigitalOcean project this agent will belong to"""
+
+ region: str
+ """The DigitalOcean region to deploy your agent in"""
+
+ tags: SequenceNotStr[str]
+ """Agent tag to organize related resources"""
+
+ workspace_uuid: str
+ """Identifier for the workspace"""
diff --git a/src/gradient/types/agent_create_response.py b/src/gradient/types/agent_create_response.py
new file mode 100644
index 00000000..a9138a04
--- /dev/null
+++ b/src/gradient/types/agent_create_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["AgentCreateResponse"]
+
+
+class AgentCreateResponse(BaseModel):
+ """Information about a newly created Agent"""
+
+ agent: Optional["APIAgent"] = None
+ """An Agent"""
+
+
+from .api_agent import APIAgent
diff --git a/src/gradient/types/agent_delete_response.py b/src/gradient/types/agent_delete_response.py
new file mode 100644
index 00000000..c16ea9fc
--- /dev/null
+++ b/src/gradient/types/agent_delete_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["AgentDeleteResponse"]
+
+
+class AgentDeleteResponse(BaseModel):
+ """Info about a deleted agent"""
+
+ agent: Optional["APIAgent"] = None
+ """An Agent"""
+
+
+from .api_agent import APIAgent
diff --git a/src/gradient/types/agent_list_params.py b/src/gradient/types/agent_list_params.py
new file mode 100644
index 00000000..b56d0395
--- /dev/null
+++ b/src/gradient/types/agent_list_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AgentListParams"]
+
+
+class AgentListParams(TypedDict, total=False):
+ only_deployed: bool
+ """Only list agents that are deployed."""
+
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/agent_list_response.py b/src/gradient/types/agent_list_response.py
new file mode 100644
index 00000000..a3b5cf6c
--- /dev/null
+++ b/src/gradient/types/agent_list_response.py
@@ -0,0 +1,288 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .api_agent_model import APIAgentModel
+from .shared.api_meta import APIMeta
+from .shared.api_links import APILinks
+from .api_knowledge_base import APIKnowledgeBase
+from .api_retrieval_method import APIRetrievalMethod
+from .api_deployment_visibility import APIDeploymentVisibility
+
+__all__ = [
+ "AgentListResponse",
+ "Agent",
+ "AgentChatbot",
+ "AgentChatbotIdentifier",
+ "AgentDeployment",
+ "AgentTemplate",
+ "AgentTemplateGuardrail",
+]
+
+
+class AgentChatbot(BaseModel):
+ """A Chatbot"""
+
+ allowed_domains: Optional[List[str]] = None
+
+ button_background_color: Optional[str] = None
+
+ logo: Optional[str] = None
+
+ name: Optional[str] = None
+ """Name of chatbot"""
+
+ primary_color: Optional[str] = None
+
+ secondary_color: Optional[str] = None
+
+ starting_message: Optional[str] = None
+
+
+class AgentChatbotIdentifier(BaseModel):
+ """Agent Chatbot Identifier"""
+
+ agent_chatbot_identifier: Optional[str] = None
+ """Agent chatbot identifier"""
+
+
+class AgentDeployment(BaseModel):
+ """Description of deployment"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ name: Optional[str] = None
+ """Name"""
+
+ status: Optional[
+ Literal[
+ "STATUS_UNKNOWN",
+ "STATUS_WAITING_FOR_DEPLOYMENT",
+ "STATUS_DEPLOYING",
+ "STATUS_RUNNING",
+ "STATUS_FAILED",
+ "STATUS_WAITING_FOR_UNDEPLOYMENT",
+ "STATUS_UNDEPLOYING",
+ "STATUS_UNDEPLOYMENT_FAILED",
+ "STATUS_DELETED",
+ "STATUS_BUILDING",
+ ]
+ ] = None
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ url: Optional[str] = None
+ """Access your deployed agent here"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
+
+ visibility: Optional[APIDeploymentVisibility] = None
+ """
+ - VISIBILITY_UNKNOWN: The status of the deployment is unknown
+ - VISIBILITY_DISABLED: The deployment is disabled and will no longer service
+ requests
+ - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state
+ - VISIBILITY_PUBLIC: The deployment is public and will service requests from the
+ public internet
+ - VISIBILITY_PRIVATE: The deployment is private and will only service requests
+ from other agents, or through API keys
+ """
+
+
+class AgentTemplateGuardrail(BaseModel):
+ priority: Optional[int] = None
+ """Priority of the guardrail"""
+
+ uuid: Optional[str] = None
+ """Uuid of the guardrail"""
+
+
+class AgentTemplate(BaseModel):
+ """Represents an AgentTemplate entity"""
+
+ created_at: Optional[datetime] = None
+ """The agent template's creation date"""
+
+ description: Optional[str] = None
+ """Deprecated - Use summary instead"""
+
+ guardrails: Optional[List[AgentTemplateGuardrail]] = None
+ """List of guardrails associated with the agent template"""
+
+ instruction: Optional[str] = None
+ """Instructions for the agent template"""
+
+ k: Optional[int] = None
+ """The 'k' value for the agent template"""
+
+ knowledge_bases: Optional[List[APIKnowledgeBase]] = None
+ """List of knowledge bases associated with the agent template"""
+
+ long_description: Optional[str] = None
+ """The long description of the agent template"""
+
+ max_tokens: Optional[int] = None
+ """The max_tokens setting for the agent template"""
+
+ model: Optional[APIAgentModel] = None
+ """Description of a Model"""
+
+ name: Optional[str] = None
+ """Name of the agent template"""
+
+ short_description: Optional[str] = None
+ """The short description of the agent template"""
+
+ summary: Optional[str] = None
+ """The summary of the agent template"""
+
+ tags: Optional[List[str]] = None
+ """List of tags associated with the agent template"""
+
+ temperature: Optional[float] = None
+ """The temperature setting for the agent template"""
+
+ template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None
+ """
+ - AGENT_TEMPLATE_TYPE_STANDARD: The standard agent template
+ - AGENT_TEMPLATE_TYPE_ONE_CLICK: The one click agent template
+ """
+
+ top_p: Optional[float] = None
+ """The top_p setting for the agent template"""
+
+ updated_at: Optional[datetime] = None
+ """The agent template's last updated date"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
+
+
+class Agent(BaseModel):
+ """A GenAI Agent's configuration"""
+
+ chatbot: Optional[AgentChatbot] = None
+ """A Chatbot"""
+
+ chatbot_identifiers: Optional[List[AgentChatbotIdentifier]] = None
+ """Chatbot identifiers"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ deployment: Optional[AgentDeployment] = None
+ """Description of deployment"""
+
+ description: Optional[str] = None
+ """Description of agent"""
+
+ if_case: Optional[str] = None
+ """Instructions to the agent on how to use the route"""
+
+ instruction: Optional[str] = None
+ """Agent instruction.
+
+ Instructions help your agent to perform its job effectively. See
+ [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions)
+ for best practices.
+ """
+
+ k: Optional[int] = None
+ """How many results should be considered from an attached knowledge base"""
+
+ max_tokens: Optional[int] = None
+ """
+ Specifies the maximum number of tokens the model can process in a single input
+ or output, set as a number between 1 and 512. This determines the length of each
+ response.
+ """
+
+ model: Optional[APIAgentModel] = None
+ """Description of a Model"""
+
+ name: Optional[str] = None
+ """Agent name"""
+
+ project_id: Optional[str] = None
+ """The DigitalOcean project ID associated with the agent"""
+
+ provide_citations: Optional[bool] = None
+ """Whether the agent should provide in-response citations"""
+
+ region: Optional[str] = None
+ """Region code"""
+
+ retrieval_method: Optional[APIRetrievalMethod] = None
+ """
+ - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown
+ - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite
+ - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back
+ - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries
+ - RETRIEVAL_METHOD_NONE: The retrieval method is none
+ """
+
+ route_created_at: Optional[datetime] = None
+ """Creation of route date / time"""
+
+ route_created_by: Optional[str] = None
+ """Id of user that created the route"""
+
+ route_name: Optional[str] = None
+ """Route name"""
+
+ route_uuid: Optional[str] = None
+ """Route uuid"""
+
+ tags: Optional[List[str]] = None
+ """A set of abitrary tags to organize your agent"""
+
+ temperature: Optional[float] = None
+ """Controls the model’s creativity, specified as a number between 0 and 1.
+
+ Lower values produce more predictable and conservative responses, while higher
+ values encourage creativity and variation.
+ """
+
+ template: Optional[AgentTemplate] = None
+ """Represents an AgentTemplate entity"""
+
+ top_p: Optional[float] = None
+ """
+ Defines the cumulative probability threshold for word selection, specified as a
+ number between 0 and 1. Higher values allow for more diverse outputs, while
+ lower values ensure focused and coherent responses.
+ """
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ url: Optional[str] = None
+ """Access your agent under this url"""
+
+ user_id: Optional[str] = None
+ """Id of user that created the agent"""
+
+ uuid: Optional[str] = None
+ """Unique agent id"""
+
+ version_hash: Optional[str] = None
+ """The latest version of the agent"""
+
+
+class AgentListResponse(BaseModel):
+ """List of Agents"""
+
+ agents: Optional[List[Agent]] = None
+ """Agents"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/agent_retrieve_response.py b/src/gradient/types/agent_retrieve_response.py
new file mode 100644
index 00000000..c8b25e0b
--- /dev/null
+++ b/src/gradient/types/agent_retrieve_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["AgentRetrieveResponse"]
+
+
+class AgentRetrieveResponse(BaseModel):
+ """One Agent"""
+
+ agent: Optional["APIAgent"] = None
+ """An Agent"""
+
+
+from .api_agent import APIAgent
diff --git a/src/gradient/types/agent_retrieve_usage_params.py b/src/gradient/types/agent_retrieve_usage_params.py
new file mode 100644
index 00000000..f5471151
--- /dev/null
+++ b/src/gradient/types/agent_retrieve_usage_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AgentRetrieveUsageParams"]
+
+
+class AgentRetrieveUsageParams(TypedDict, total=False):
+ start: str
+ """Return all usage data from this date."""
+
+ stop: str
+ """
+ Return all usage data up to this date, if omitted, will return up to the current
+ date.
+ """
diff --git a/src/gradient/types/agent_retrieve_usage_response.py b/src/gradient/types/agent_retrieve_usage_response.py
new file mode 100644
index 00000000..f4622ec8
--- /dev/null
+++ b/src/gradient/types/agent_retrieve_usage_response.py
@@ -0,0 +1,58 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from .._models import BaseModel
+
+__all__ = ["AgentRetrieveUsageResponse", "LogInsightsUsage", "LogInsightsUsageMeasurement", "Usage", "UsageMeasurement"]
+
+
+class LogInsightsUsageMeasurement(BaseModel):
+ """Usage Measurement Description"""
+
+ tokens: Optional[int] = None
+
+ usage_type: Optional[str] = None
+
+
+class LogInsightsUsage(BaseModel):
+ """Resource Usage Description"""
+
+ measurements: Optional[List[LogInsightsUsageMeasurement]] = None
+
+ resource_uuid: Optional[str] = None
+
+ start: Optional[datetime] = None
+
+ stop: Optional[datetime] = None
+
+
+class UsageMeasurement(BaseModel):
+ """Usage Measurement Description"""
+
+ tokens: Optional[int] = None
+
+ usage_type: Optional[str] = None
+
+
+class Usage(BaseModel):
+ """Resource Usage Description"""
+
+ measurements: Optional[List[UsageMeasurement]] = None
+
+ resource_uuid: Optional[str] = None
+
+ start: Optional[datetime] = None
+
+ stop: Optional[datetime] = None
+
+
+class AgentRetrieveUsageResponse(BaseModel):
+ """Agent usage"""
+
+ log_insights_usage: Optional[LogInsightsUsage] = None
+ """Resource Usage Description"""
+
+ usage: Optional[Usage] = None
+ """Resource Usage Description"""
diff --git a/src/gradient/types/agent_update_params.py b/src/gradient/types/agent_update_params.py
new file mode 100644
index 00000000..5026beaa
--- /dev/null
+++ b/src/gradient/types/agent_update_params.py
@@ -0,0 +1,94 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from .._types import SequenceNotStr
+from .._utils import PropertyInfo
+from .api_retrieval_method import APIRetrievalMethod
+
+__all__ = ["AgentUpdateParams"]
+
+
+class AgentUpdateParams(TypedDict, total=False):
+ agent_log_insights_enabled: bool
+
+ allowed_domains: SequenceNotStr[str]
+ """
+ Optional list of allowed domains for the chatbot - Must use fully qualified
+ domain name (FQDN) such as https://example.com
+ """
+
+ anthropic_key_uuid: str
+ """Optional anthropic key uuid for use with anthropic models"""
+
+ conversation_logs_enabled: bool
+ """Optional update of conversation logs enabled"""
+
+ description: str
+ """Agent description"""
+
+ instruction: str
+ """Agent instruction.
+
+ Instructions help your agent to perform its job effectively. See
+ [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions)
+ for best practices.
+ """
+
+ k: int
+ """How many results should be considered from an attached knowledge base"""
+
+ max_tokens: int
+ """
+ Specifies the maximum number of tokens the model can process in a single input
+ or output, set as a number between 1 and 512. This determines the length of each
+ response.
+ """
+
+ model_provider_key_uuid: str
+ """Optional Model Provider uuid for use with provider models"""
+
+ model_uuid: str
+ """Identifier for the foundation model."""
+
+ name: str
+ """Agent name"""
+
+ openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")]
+ """Optional OpenAI key uuid for use with OpenAI models"""
+
+ project_id: str
+ """The id of the DigitalOcean project this agent will belong to"""
+
+ provide_citations: bool
+
+ retrieval_method: APIRetrievalMethod
+ """
+ - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown
+ - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite
+ - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back
+ - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries
+ - RETRIEVAL_METHOD_NONE: The retrieval method is none
+ """
+
+ tags: SequenceNotStr[str]
+ """A set of abitrary tags to organize your agent"""
+
+ temperature: float
+ """Controls the model’s creativity, specified as a number between 0 and 1.
+
+ Lower values produce more predictable and conservative responses, while higher
+ values encourage creativity and variation.
+ """
+
+ top_p: float
+ """
+ Defines the cumulative probability threshold for word selection, specified as a
+ number between 0 and 1. Higher values allow for more diverse outputs, while
+ lower values ensure focused and coherent responses.
+ """
+
+ body_uuid: Annotated[str, PropertyInfo(alias="uuid")]
+ """Unique agent id"""
diff --git a/src/gradient/types/agent_update_response.py b/src/gradient/types/agent_update_response.py
new file mode 100644
index 00000000..fb232225
--- /dev/null
+++ b/src/gradient/types/agent_update_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["AgentUpdateResponse"]
+
+
+class AgentUpdateResponse(BaseModel):
+ """Information about an updated agent"""
+
+ agent: Optional["APIAgent"] = None
+ """An Agent"""
+
+
+from .api_agent import APIAgent
diff --git a/src/gradient/types/agent_update_status_params.py b/src/gradient/types/agent_update_status_params.py
new file mode 100644
index 00000000..3f16fdc2
--- /dev/null
+++ b/src/gradient/types/agent_update_status_params.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from .._utils import PropertyInfo
+from .api_deployment_visibility import APIDeploymentVisibility
+
+__all__ = ["AgentUpdateStatusParams"]
+
+
+class AgentUpdateStatusParams(TypedDict, total=False):
+ body_uuid: Annotated[str, PropertyInfo(alias="uuid")]
+ """Unique id"""
+
+ visibility: APIDeploymentVisibility
+ """
+ - VISIBILITY_UNKNOWN: The status of the deployment is unknown
+ - VISIBILITY_DISABLED: The deployment is disabled and will no longer service
+ requests
+ - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state
+ - VISIBILITY_PUBLIC: The deployment is public and will service requests from the
+ public internet
+ - VISIBILITY_PRIVATE: The deployment is private and will only service requests
+ from other agents, or through API keys
+ """
diff --git a/src/gradient/types/agent_update_status_response.py b/src/gradient/types/agent_update_status_response.py
new file mode 100644
index 00000000..a562915e
--- /dev/null
+++ b/src/gradient/types/agent_update_status_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["AgentUpdateStatusResponse"]
+
+
+class AgentUpdateStatusResponse(BaseModel):
+ """UpdateAgentDeploymentVisbilityOutput description"""
+
+ agent: Optional["APIAgent"] = None
+ """An Agent"""
+
+
+from .api_agent import APIAgent
diff --git a/src/gradient/types/agents/__init__.py b/src/gradient/types/agents/__init__.py
new file mode 100644
index 00000000..39b82ebc
--- /dev/null
+++ b/src/gradient/types/agents/__init__.py
@@ -0,0 +1,74 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .api_star_metric import APIStarMetric as APIStarMetric
+from .route_add_params import RouteAddParams as RouteAddParams
+from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun
+from .route_add_response import RouteAddResponse as RouteAddResponse
+from .api_key_list_params import APIKeyListParams as APIKeyListParams
+from .route_update_params import RouteUpdateParams as RouteUpdateParams
+from .route_view_response import RouteViewResponse as RouteViewResponse
+from .version_list_params import VersionListParams as VersionListParams
+from .api_evaluation_metric import APIEvaluationMetric as APIEvaluationMetric
+from .api_evaluation_prompt import APIEvaluationPrompt as APIEvaluationPrompt
+from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams
+from .api_key_list_response import APIKeyListResponse as APIKeyListResponse
+from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams
+from .api_star_metric_param import APIStarMetricParam as APIStarMetricParam
+from .route_delete_response import RouteDeleteResponse as RouteDeleteResponse
+from .route_update_response import RouteUpdateResponse as RouteUpdateResponse
+from .version_list_response import VersionListResponse as VersionListResponse
+from .version_update_params import VersionUpdateParams as VersionUpdateParams
+from .function_create_params import FunctionCreateParams as FunctionCreateParams
+from .function_update_params import FunctionUpdateParams as FunctionUpdateParams
+from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse
+from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse
+from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse
+from .version_update_response import VersionUpdateResponse as VersionUpdateResponse
+from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase
+from .function_create_response import FunctionCreateResponse as FunctionCreateResponse
+from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse
+from .function_update_response import FunctionUpdateResponse as FunctionUpdateResponse
+from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse
+from .api_evaluation_metric_result import APIEvaluationMetricResult as APIEvaluationMetricResult
+from .evaluation_run_create_params import EvaluationRunCreateParams as EvaluationRunCreateParams
+from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput
+from .evaluation_run_create_response import EvaluationRunCreateResponse as EvaluationRunCreateResponse
+from .knowledge_base_detach_response import KnowledgeBaseDetachResponse as KnowledgeBaseDetachResponse
+from .evaluation_metric_list_response import EvaluationMetricListResponse as EvaluationMetricListResponse
+from .evaluation_dataset_create_params import EvaluationDatasetCreateParams as EvaluationDatasetCreateParams
+from .evaluation_run_retrieve_response import EvaluationRunRetrieveResponse as EvaluationRunRetrieveResponse
+from .evaluation_dataset_create_response import EvaluationDatasetCreateResponse as EvaluationDatasetCreateResponse
+from .evaluation_run_list_results_params import EvaluationRunListResultsParams as EvaluationRunListResultsParams
+from .evaluation_test_case_create_params import EvaluationTestCaseCreateParams as EvaluationTestCaseCreateParams
+from .evaluation_test_case_list_response import EvaluationTestCaseListResponse as EvaluationTestCaseListResponse
+from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams
+from .evaluation_run_list_results_response import EvaluationRunListResultsResponse as EvaluationRunListResultsResponse
+from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse
+from .evaluation_test_case_retrieve_params import EvaluationTestCaseRetrieveParams as EvaluationTestCaseRetrieveParams
+from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse
+from .evaluation_metric_list_regions_params import (
+ EvaluationMetricListRegionsParams as EvaluationMetricListRegionsParams,
+)
+from .evaluation_test_case_retrieve_response import (
+ EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse,
+)
+from .evaluation_metric_list_regions_response import (
+ EvaluationMetricListRegionsResponse as EvaluationMetricListRegionsResponse,
+)
+from .evaluation_run_retrieve_results_response import (
+ EvaluationRunRetrieveResultsResponse as EvaluationRunRetrieveResultsResponse,
+)
+from .evaluation_test_case_list_evaluation_runs_params import (
+ EvaluationTestCaseListEvaluationRunsParams as EvaluationTestCaseListEvaluationRunsParams,
+)
+from .evaluation_test_case_list_evaluation_runs_response import (
+ EvaluationTestCaseListEvaluationRunsResponse as EvaluationTestCaseListEvaluationRunsResponse,
+)
+from .evaluation_dataset_create_file_upload_presigned_urls_params import (
+ EvaluationDatasetCreateFileUploadPresignedURLsParams as EvaluationDatasetCreateFileUploadPresignedURLsParams,
+)
+from .evaluation_dataset_create_file_upload_presigned_urls_response import (
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse as EvaluationDatasetCreateFileUploadPresignedURLsResponse,
+)
diff --git a/src/gradient/types/agents/api_evaluation_metric.py b/src/gradient/types/agents/api_evaluation_metric.py
new file mode 100644
index 00000000..84c3ea0a
--- /dev/null
+++ b/src/gradient/types/agents/api_evaluation_metric.py
@@ -0,0 +1,53 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["APIEvaluationMetric"]
+
+
+class APIEvaluationMetric(BaseModel):
+ category: Optional[
+ Literal[
+ "METRIC_CATEGORY_UNSPECIFIED",
+ "METRIC_CATEGORY_CORRECTNESS",
+ "METRIC_CATEGORY_USER_OUTCOMES",
+ "METRIC_CATEGORY_SAFETY_AND_SECURITY",
+ "METRIC_CATEGORY_CONTEXT_QUALITY",
+ "METRIC_CATEGORY_MODEL_FIT",
+ ]
+ ] = None
+
+ description: Optional[str] = None
+
+ inverted: Optional[bool] = None
+ """If true, the metric is inverted, meaning that a lower value is better."""
+
+ is_metric_goal: Optional[bool] = None
+
+ metric_name: Optional[str] = None
+
+ metric_rank: Optional[int] = None
+
+ metric_type: Optional[
+ Literal["METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", "METRIC_TYPE_RAG_AND_TOOL"]
+ ] = None
+
+ metric_uuid: Optional[str] = None
+
+ metric_value_type: Optional[
+ Literal[
+ "METRIC_VALUE_TYPE_UNSPECIFIED",
+ "METRIC_VALUE_TYPE_NUMBER",
+ "METRIC_VALUE_TYPE_STRING",
+ "METRIC_VALUE_TYPE_PERCENTAGE",
+ ]
+ ] = None
+
+ range_max: Optional[float] = None
+ """The maximum value for the metric."""
+
+ range_min: Optional[float] = None
+ """The minimum value for the metric."""
diff --git a/src/gradient/types/agents/api_evaluation_metric_result.py b/src/gradient/types/agents/api_evaluation_metric_result.py
new file mode 100644
index 00000000..3d6ea84f
--- /dev/null
+++ b/src/gradient/types/agents/api_evaluation_metric_result.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["APIEvaluationMetricResult"]
+
+
+class APIEvaluationMetricResult(BaseModel):
+ error_description: Optional[str] = None
+ """Error description if the metric could not be calculated."""
+
+ metric_name: Optional[str] = None
+ """Metric name"""
+
+ metric_value_type: Optional[
+ Literal[
+ "METRIC_VALUE_TYPE_UNSPECIFIED",
+ "METRIC_VALUE_TYPE_NUMBER",
+ "METRIC_VALUE_TYPE_STRING",
+ "METRIC_VALUE_TYPE_PERCENTAGE",
+ ]
+ ] = None
+
+ number_value: Optional[float] = None
+ """The value of the metric as a number."""
+
+ reasoning: Optional[str] = None
+ """Reasoning of the metric result."""
+
+ string_value: Optional[str] = None
+ """The value of the metric as a string."""
diff --git a/src/gradient/types/agents/api_evaluation_prompt.py b/src/gradient/types/agents/api_evaluation_prompt.py
new file mode 100644
index 00000000..1bb08bf1
--- /dev/null
+++ b/src/gradient/types/agents/api_evaluation_prompt.py
@@ -0,0 +1,104 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .api_evaluation_metric_result import APIEvaluationMetricResult
+
+__all__ = ["APIEvaluationPrompt", "EvaluationTraceSpan", "EvaluationTraceSpanRetrieverChunk", "PromptChunk"]
+
+
+class EvaluationTraceSpanRetrieverChunk(BaseModel):
+ chunk_usage_pct: Optional[float] = None
+ """The usage percentage of the chunk."""
+
+ chunk_used: Optional[bool] = None
+ """Indicates if the chunk was used in the prompt."""
+
+ index_uuid: Optional[str] = None
+ """The index uuid (Knowledge Base) of the chunk."""
+
+ source_name: Optional[str] = None
+ """The source name for the chunk, e.g., the file name or document title."""
+
+ text: Optional[str] = None
+ """Text content of the chunk."""
+
+
+class EvaluationTraceSpan(BaseModel):
+ """Represents a span within an evaluatioin trace (e.g., LLM call, tool call, etc.)"""
+
+ created_at: Optional[datetime] = None
+ """When the span was created"""
+
+ input: Optional[object] = None
+ """
+ Input data for the span (flexible structure - can be messages array, string,
+ etc.)
+ """
+
+ name: Optional[str] = None
+ """Name/identifier for the span"""
+
+ output: Optional[object] = None
+ """Output data from the span (flexible structure - can be message, string, etc.)"""
+
+ retriever_chunks: Optional[List[EvaluationTraceSpanRetrieverChunk]] = None
+ """Any retriever span chunks that were included as part of the span."""
+
+ span_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None
+ """The span-level metric results."""
+
+ type: Optional[
+ Literal["TRACE_SPAN_TYPE_UNKNOWN", "TRACE_SPAN_TYPE_LLM", "TRACE_SPAN_TYPE_RETRIEVER", "TRACE_SPAN_TYPE_TOOL"]
+ ] = None
+ """Types of spans in a trace"""
+
+
+class PromptChunk(BaseModel):
+ chunk_usage_pct: Optional[float] = None
+ """The usage percentage of the chunk."""
+
+ chunk_used: Optional[bool] = None
+ """Indicates if the chunk was used in the prompt."""
+
+ index_uuid: Optional[str] = None
+ """The index uuid (Knowledge Base) of the chunk."""
+
+ source_name: Optional[str] = None
+ """The source name for the chunk, e.g., the file name or document title."""
+
+ text: Optional[str] = None
+ """Text content of the chunk."""
+
+
+class APIEvaluationPrompt(BaseModel):
+ evaluation_trace_spans: Optional[List[EvaluationTraceSpan]] = None
+ """The evaluated trace spans."""
+
+ ground_truth: Optional[str] = None
+ """The ground truth for the prompt."""
+
+ input: Optional[str] = None
+
+ input_tokens: Optional[str] = None
+ """The number of input tokens used in the prompt."""
+
+ output: Optional[str] = None
+
+ output_tokens: Optional[str] = None
+ """The number of output tokens used in the prompt."""
+
+ prompt_chunks: Optional[List[PromptChunk]] = None
+ """The list of prompt chunks."""
+
+ prompt_id: Optional[int] = None
+ """Prompt ID"""
+
+ prompt_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None
+ """The metric results for the prompt."""
+
+ trace_id: Optional[str] = None
+ """The trace id for the prompt."""
diff --git a/src/gradient/types/agents/api_evaluation_run.py b/src/gradient/types/agents/api_evaluation_run.py
new file mode 100644
index 00000000..ed4ef0a1
--- /dev/null
+++ b/src/gradient/types/agents/api_evaluation_run.py
@@ -0,0 +1,89 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .api_evaluation_metric_result import APIEvaluationMetricResult
+
+__all__ = ["APIEvaluationRun"]
+
+
+class APIEvaluationRun(BaseModel):
+ agent_deleted: Optional[bool] = None
+ """Whether agent is deleted"""
+
+ agent_deployment_name: Optional[str] = None
+ """The agent deployment name"""
+
+ agent_name: Optional[str] = None
+ """Agent name"""
+
+ agent_uuid: Optional[str] = None
+ """Agent UUID."""
+
+ agent_version_hash: Optional[str] = None
+ """Version hash"""
+
+ agent_workspace_uuid: Optional[str] = None
+ """Agent workspace uuid"""
+
+ created_by_user_email: Optional[str] = None
+
+ created_by_user_id: Optional[str] = None
+
+ error_description: Optional[str] = None
+ """The error description"""
+
+ evaluation_run_uuid: Optional[str] = None
+ """Evaluation run UUID."""
+
+ evaluation_test_case_workspace_uuid: Optional[str] = None
+ """Evaluation test case workspace uuid"""
+
+ finished_at: Optional[datetime] = None
+ """Run end time."""
+
+ pass_status: Optional[bool] = None
+ """The pass status of the evaluation run based on the star metric."""
+
+ queued_at: Optional[datetime] = None
+ """Run queued time."""
+
+ run_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None
+
+ run_name: Optional[str] = None
+ """Run name."""
+
+ star_metric_result: Optional[APIEvaluationMetricResult] = None
+
+ started_at: Optional[datetime] = None
+ """Run start time."""
+
+ status: Optional[
+ Literal[
+ "EVALUATION_RUN_STATUS_UNSPECIFIED",
+ "EVALUATION_RUN_QUEUED",
+ "EVALUATION_RUN_RUNNING_DATASET",
+ "EVALUATION_RUN_EVALUATING_RESULTS",
+ "EVALUATION_RUN_CANCELLING",
+ "EVALUATION_RUN_CANCELLED",
+ "EVALUATION_RUN_SUCCESSFUL",
+ "EVALUATION_RUN_PARTIALLY_SUCCESSFUL",
+ "EVALUATION_RUN_FAILED",
+ ]
+ ] = None
+ """Evaluation Run Statuses"""
+
+ test_case_description: Optional[str] = None
+ """Test case description."""
+
+ test_case_name: Optional[str] = None
+ """Test case name."""
+
+ test_case_uuid: Optional[str] = None
+ """Test-case UUID."""
+
+ test_case_version: Optional[int] = None
+ """Test-case-version."""
diff --git a/src/gradient/types/agents/api_evaluation_test_case.py b/src/gradient/types/agents/api_evaluation_test_case.py
new file mode 100644
index 00000000..dc4c55f0
--- /dev/null
+++ b/src/gradient/types/agents/api_evaluation_test_case.py
@@ -0,0 +1,68 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ..._models import BaseModel
+from .api_star_metric import APIStarMetric
+from .api_evaluation_metric import APIEvaluationMetric
+
+__all__ = ["APIEvaluationTestCase", "Dataset"]
+
+
+class Dataset(BaseModel):
+ created_at: Optional[datetime] = None
+ """Time created at."""
+
+ dataset_name: Optional[str] = None
+ """Name of the dataset."""
+
+ dataset_uuid: Optional[str] = None
+ """UUID of the dataset."""
+
+ file_size: Optional[str] = None
+ """The size of the dataset uploaded file in bytes."""
+
+ has_ground_truth: Optional[bool] = None
+ """Does the dataset have a ground truth column?"""
+
+ row_count: Optional[int] = None
+ """Number of rows in the dataset."""
+
+
+class APIEvaluationTestCase(BaseModel):
+ archived_at: Optional[datetime] = None
+
+ created_at: Optional[datetime] = None
+
+ created_by_user_email: Optional[str] = None
+
+ created_by_user_id: Optional[str] = None
+
+ dataset: Optional[Dataset] = None
+
+ dataset_name: Optional[str] = None
+
+ dataset_uuid: Optional[str] = None
+
+ description: Optional[str] = None
+
+ latest_version_number_of_runs: Optional[int] = None
+
+ metrics: Optional[List[APIEvaluationMetric]] = None
+
+ name: Optional[str] = None
+
+ star_metric: Optional[APIStarMetric] = None
+
+ test_case_uuid: Optional[str] = None
+
+ total_runs: Optional[int] = None
+
+ updated_at: Optional[datetime] = None
+
+ updated_by_user_email: Optional[str] = None
+
+ updated_by_user_id: Optional[str] = None
+
+ version: Optional[int] = None
diff --git a/src/gradient/types/agents/api_key_create_params.py b/src/gradient/types/agents/api_key_create_params.py
new file mode 100644
index 00000000..184c330c
--- /dev/null
+++ b/src/gradient/types/agents/api_key_create_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+
+__all__ = ["APIKeyCreateParams"]
+
+
+class APIKeyCreateParams(TypedDict, total=False):
+ body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")]
+ """Agent id"""
+
+ name: str
+ """A human friendly name to identify the key"""
diff --git a/src/gradient/types/agents/api_key_create_response.py b/src/gradient/types/agents/api_key_create_response.py
new file mode 100644
index 00000000..ed8906c8
--- /dev/null
+++ b/src/gradient/types/agents/api_key_create_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from ..api_agent_api_key_info import APIAgentAPIKeyInfo
+
+__all__ = ["APIKeyCreateResponse"]
+
+
+class APIKeyCreateResponse(BaseModel):
+ api_key_info: Optional[APIAgentAPIKeyInfo] = None
+ """Agent API Key Info"""
diff --git a/src/gradient/types/agents/api_key_delete_response.py b/src/gradient/types/agents/api_key_delete_response.py
new file mode 100644
index 00000000..1f38c52e
--- /dev/null
+++ b/src/gradient/types/agents/api_key_delete_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from ..api_agent_api_key_info import APIAgentAPIKeyInfo
+
+__all__ = ["APIKeyDeleteResponse"]
+
+
+class APIKeyDeleteResponse(BaseModel):
+ api_key_info: Optional[APIAgentAPIKeyInfo] = None
+ """Agent API Key Info"""
diff --git a/src/gradient/types/agents/api_key_list_params.py b/src/gradient/types/agents/api_key_list_params.py
new file mode 100644
index 00000000..1f8f96b7
--- /dev/null
+++ b/src/gradient/types/agents/api_key_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["APIKeyListParams"]
+
+
+class APIKeyListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/agents/api_key_list_response.py b/src/gradient/types/agents/api_key_list_response.py
new file mode 100644
index 00000000..0040e91c
--- /dev/null
+++ b/src/gradient/types/agents/api_key_list_response.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.api_meta import APIMeta
+from ..shared.api_links import APILinks
+from ..api_agent_api_key_info import APIAgentAPIKeyInfo
+
+__all__ = ["APIKeyListResponse"]
+
+
+class APIKeyListResponse(BaseModel):
+ api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None
+ """Api key infos"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/agents/api_key_regenerate_response.py b/src/gradient/types/agents/api_key_regenerate_response.py
new file mode 100644
index 00000000..400140fb
--- /dev/null
+++ b/src/gradient/types/agents/api_key_regenerate_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from ..api_agent_api_key_info import APIAgentAPIKeyInfo
+
+__all__ = ["APIKeyRegenerateResponse"]
+
+
+class APIKeyRegenerateResponse(BaseModel):
+ api_key_info: Optional[APIAgentAPIKeyInfo] = None
+ """Agent API Key Info"""
diff --git a/src/gradient/types/agents/api_key_update_params.py b/src/gradient/types/agents/api_key_update_params.py
new file mode 100644
index 00000000..ba997a2f
--- /dev/null
+++ b/src/gradient/types/agents/api_key_update_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+
+__all__ = ["APIKeyUpdateParams"]
+
+
+class APIKeyUpdateParams(TypedDict, total=False):
+ path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]]
+
+ body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")]
+ """Agent id"""
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+ """API key ID"""
+
+ name: str
+ """Name"""
diff --git a/src/gradient/types/agents/api_key_update_response.py b/src/gradient/types/agents/api_key_update_response.py
new file mode 100644
index 00000000..56154b16
--- /dev/null
+++ b/src/gradient/types/agents/api_key_update_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from ..api_agent_api_key_info import APIAgentAPIKeyInfo
+
+__all__ = ["APIKeyUpdateResponse"]
+
+
+class APIKeyUpdateResponse(BaseModel):
+ api_key_info: Optional[APIAgentAPIKeyInfo] = None
+ """Agent API Key Info"""
diff --git a/src/gradient/types/agents/api_link_knowledge_base_output.py b/src/gradient/types/agents/api_link_knowledge_base_output.py
new file mode 100644
index 00000000..d59f2677
--- /dev/null
+++ b/src/gradient/types/agents/api_link_knowledge_base_output.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["APILinkKnowledgeBaseOutput"]
+
+
+class APILinkKnowledgeBaseOutput(BaseModel):
+ """Information about a linked knowledge base"""
+
+ agent: Optional["APIAgent"] = None
+ """An Agent"""
+
+
+from ..api_agent import APIAgent
diff --git a/src/gradient/types/agents/api_star_metric.py b/src/gradient/types/agents/api_star_metric.py
new file mode 100644
index 00000000..0d04dea9
--- /dev/null
+++ b/src/gradient/types/agents/api_star_metric.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["APIStarMetric"]
+
+
+class APIStarMetric(BaseModel):
+ metric_uuid: Optional[str] = None
+
+ name: Optional[str] = None
+
+ success_threshold: Optional[float] = None
+ """
+ The success threshold for the star metric. This is a value that the metric must
+ reach to be considered successful.
+ """
+
+ success_threshold_pct: Optional[int] = None
+ """
+ The success threshold for the star metric. This is a percentage value between 0
+ and 100.
+ """
diff --git a/src/gradient/types/agents/api_star_metric_param.py b/src/gradient/types/agents/api_star_metric_param.py
new file mode 100644
index 00000000..781fb2b1
--- /dev/null
+++ b/src/gradient/types/agents/api_star_metric_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["APIStarMetricParam"]
+
+
+class APIStarMetricParam(TypedDict, total=False):
+ metric_uuid: str
+
+ name: str
+
+ success_threshold: float
+ """
+ The success threshold for the star metric. This is a value that the metric must
+ reach to be considered successful.
+ """
+
+ success_threshold_pct: int
+ """
+ The success threshold for the star metric. This is a percentage value between 0
+ and 100.
+ """
diff --git a/src/gradient/types/agents/chat/__init__.py b/src/gradient/types/agents/chat/__init__.py
new file mode 100644
index 00000000..9384ac14
--- /dev/null
+++ b/src/gradient/types/agents/chat/__init__.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .completion_create_params import CompletionCreateParams as CompletionCreateParams
+from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
diff --git a/src/gradient/types/agents/chat/completion_create_params.py b/src/gradient/types/agents/chat/completion_create_params.py
new file mode 100644
index 00000000..f01fa283
--- /dev/null
+++ b/src/gradient/types/agents/chat/completion_create_params.py
@@ -0,0 +1,800 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = [
+ "CompletionCreateParamsBase",
+ "Message",
+ "MessageChatCompletionRequestSystemMessage",
+ "MessageChatCompletionRequestSystemMessageContent",
+ "MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestSystemMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestDeveloperMessage",
+ "MessageChatCompletionRequestDeveloperMessageContent",
+ "MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestUserMessage",
+ "MessageChatCompletionRequestUserMessageContent",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURL",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURLImageURL",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURL",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURLVideoURL",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURL",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURLImageURL",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURL",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURLVideoURL",
+ "MessageChatCompletionRequestAssistantMessage",
+ "MessageChatCompletionRequestAssistantMessageContent",
+ "MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestAssistantMessageToolCall",
+ "MessageChatCompletionRequestAssistantMessageToolCallFunction",
+ "MessageChatCompletionRequestToolMessage",
+ "MessageChatCompletionRequestToolMessageContent",
+ "MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestToolMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl",
+ "StreamOptions",
+ "ToolChoice",
+ "ToolChoiceChatCompletionNamedToolChoice",
+ "ToolChoiceChatCompletionNamedToolChoiceFunction",
+ "Tool",
+ "ToolFunction",
+ "CompletionCreateParamsNonStreaming",
+ "CompletionCreateParamsStreaming",
+]
+
+
+class CompletionCreateParamsBase(TypedDict, total=False):
+ messages: Required[Iterable[Message]]
+ """A list of messages comprising the conversation so far."""
+
+ model: Required[str]
+ """Model ID used to generate the response."""
+
+ frequency_penalty: Optional[float]
+ """Number between -2.0 and 2.0.
+
+ Positive values penalize new tokens based on their existing frequency in the
+ text so far, decreasing the model's likelihood to repeat the same line verbatim.
+ """
+
+ logit_bias: Optional[Dict[str, int]]
+ """Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+ """
+
+ logprobs: Optional[bool]
+ """Whether to return log probabilities of the output tokens or not.
+
+ If true, returns the log probabilities of each output token returned in the
+ `content` of `message`.
+ """
+
+ max_completion_tokens: Optional[int]
+ """
+ The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+ """
+
+ max_tokens: Optional[int]
+ """The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+ """
+
+ metadata: Optional[Dict[str, str]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ n: Optional[int]
+ """How many chat completion choices to generate for each input message.
+
+ Note that you will be charged based on the number of generated tokens across all
+ of the choices. Keep `n` as `1` to minimize costs.
+ """
+
+ presence_penalty: Optional[float]
+ """Number between -2.0 and 2.0.
+
+ Positive values penalize new tokens based on whether they appear in the text so
+ far, increasing the model's likelihood to talk about new topics.
+ """
+
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]]
+ """Constrains effort on reasoning for reasoning models.
+
+ Reducing reasoning effort can result in faster responses and fewer tokens used
+ on reasoning in a response.
+ """
+
+ stop: Union[Optional[str], SequenceNotStr[str], None]
+ """Up to 4 sequences where the API will stop generating further tokens.
+
+ The returned text will not contain the stop sequence.
+ """
+
+ stream_options: Optional[StreamOptions]
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. We generally recommend altering
+ this or `top_p` but not both.
+ """
+
+ tool_choice: ToolChoice
+ """
+ Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+ """
+
+ tools: Iterable[Tool]
+ """A list of tools the model may call.
+
+ Currently, only functions are supported as a tool.
+ """
+
+ top_logprobs: Optional[int]
+ """
+ An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+ """
+
+ user: str
+ """
+ A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+ """
+
+
+class MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartText(TypedDict, total=False):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartText(
+ TypedDict, total=False
+):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+MessageChatCompletionRequestSystemMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartText
+]
+
+MessageChatCompletionRequestSystemMessageContent: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartText,
+ SequenceNotStr[MessageChatCompletionRequestSystemMessageContentArrayOfContentPart],
+]
+
+
+class MessageChatCompletionRequestSystemMessage(TypedDict, total=False):
+ """
+ System-provided instructions that the model should follow, regardless of
+ messages sent by the user.
+ """
+
+ content: Required[MessageChatCompletionRequestSystemMessageContent]
+ """The contents of the system message."""
+
+ role: Required[Literal["system"]]
+ """The role of the messages author, in this case `system`."""
+
+
+class MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartText(TypedDict, total=False):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartText(
+ TypedDict, total=False
+):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartText
+]
+
+MessageChatCompletionRequestDeveloperMessageContent: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartText,
+ SequenceNotStr[MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart],
+]
+
+
+class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
+ """
+ Developer-provided instructions that the model should follow, regardless of
+ messages sent by the user.
+ """
+
+ content: Required[MessageChatCompletionRequestDeveloperMessageContent]
+ """The contents of the developer message."""
+
+ role: Required[Literal["developer"]]
+ """The role of the messages author, in this case `developer`."""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartText(TypedDict, total=False):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURLImageURL(
+ TypedDict, total=False
+):
+ """Image URL settings."""
+
+ url: Required[str]
+ """A URL or data URL containing image content."""
+
+ detail: Literal["auto", "low", "high"]
+ """Optional detail level for image understanding."""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURL(TypedDict, total=False):
+ """Content part with type and image URL."""
+
+ image_url: Required[MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURLImageURL]
+ """Image URL settings."""
+
+ type: Required[Literal["image_url"]]
+ """The type of content part"""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURLVideoURL(
+ TypedDict, total=False
+):
+ """Video URL settings."""
+
+ url: Required[str]
+ """A URL or data URL containing video content."""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURL(TypedDict, total=False):
+ """Content part with type and video URL."""
+
+ type: Required[Literal["video_url"]]
+ """The type of content part"""
+
+ video_url: Required[MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURLVideoURL]
+ """Video URL settings."""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartText(
+ TypedDict, total=False
+):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: (
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl
+ )
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURLImageURL(
+ TypedDict, total=False
+):
+ """Image URL settings."""
+
+ url: Required[str]
+ """A URL or data URL containing image content."""
+
+ detail: Literal["auto", "low", "high"]
+ """Optional detail level for image understanding."""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURL(
+ TypedDict, total=False
+):
+ """Content part with type and image URL."""
+
+ image_url: Required[
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURLImageURL
+ ]
+ """Image URL settings."""
+
+ type: Required[Literal["image_url"]]
+ """The type of content part"""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURLVideoURL(
+ TypedDict, total=False
+):
+ """Video URL settings."""
+
+ url: Required[str]
+ """A URL or data URL containing video content."""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURL(
+ TypedDict, total=False
+):
+ """Content part with type and video URL."""
+
+ type: Required[Literal["video_url"]]
+ """The type of content part"""
+
+ video_url: Required[
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURLVideoURL
+ ]
+ """Video URL settings."""
+
+
+MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartText,
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURL,
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURL,
+]
+
+MessageChatCompletionRequestUserMessageContent: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartText,
+ MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURL,
+ MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURL,
+ SequenceNotStr[MessageChatCompletionRequestUserMessageContentArrayOfContentPart],
+]
+
+
+class MessageChatCompletionRequestUserMessage(TypedDict, total=False):
+ """
+ Messages sent by an end user, containing prompts or additional context
+ information.
+ """
+
+ content: Required[MessageChatCompletionRequestUserMessageContent]
+ """The contents of the user message."""
+
+ role: Required[Literal["user"]]
+ """The role of the messages author, in this case `user`."""
+
+
+class MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartText(TypedDict, total=False):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartText(
+ TypedDict, total=False
+):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartText
+]
+
+MessageChatCompletionRequestAssistantMessageContent: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartText,
+ SequenceNotStr[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart],
+]
+
+
+class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False):
+ """The function that the model called."""
+
+ arguments: Required[str]
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: Required[str]
+ """The name of the function to call."""
+
+
+class MessageChatCompletionRequestAssistantMessageToolCall(TypedDict, total=False):
+ id: Required[str]
+ """The ID of the tool call."""
+
+ function: Required[MessageChatCompletionRequestAssistantMessageToolCallFunction]
+ """The function that the model called."""
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False):
+ """Messages sent by the model in response to user messages."""
+
+ role: Required[Literal["assistant"]]
+ """The role of the messages author, in this case `assistant`."""
+
+ content: Optional[MessageChatCompletionRequestAssistantMessageContent]
+ """The contents of the assistant message."""
+
+ tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall]
+ """The tool calls generated by the model, such as function calls."""
+
+
+class MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartText(TypedDict, total=False):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartText(
+ TypedDict, total=False
+):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: (
+ MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl
+ )
+ """Cache control settings for the content part."""
+
+
+MessageChatCompletionRequestToolMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartText
+]
+
+MessageChatCompletionRequestToolMessageContent: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartText,
+ SequenceNotStr[MessageChatCompletionRequestToolMessageContentArrayOfContentPart],
+]
+
+
+class MessageChatCompletionRequestToolMessage(TypedDict, total=False):
+ content: Required[MessageChatCompletionRequestToolMessageContent]
+ """The contents of the tool message."""
+
+ role: Required[Literal["tool"]]
+ """The role of the messages author, in this case `tool`."""
+
+ tool_call_id: Required[str]
+ """Tool call that this message is responding to."""
+
+
+Message: TypeAlias = Union[
+ MessageChatCompletionRequestSystemMessage,
+ MessageChatCompletionRequestDeveloperMessage,
+ MessageChatCompletionRequestUserMessage,
+ MessageChatCompletionRequestAssistantMessage,
+ MessageChatCompletionRequestToolMessage,
+]
+
+
+class StreamOptions(TypedDict, total=False):
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
+ include_usage: bool
+ """If set, an additional chunk will be streamed before the `data: [DONE]` message.
+
+ The `usage` field on this chunk shows the token usage statistics for the entire
+ request, and the `choices` field will always be an empty array.
+
+ All other chunks will also include a `usage` field, but with a null value.
+ **NOTE:** If the stream is interrupted, you may not receive the final usage
+ chunk which contains the total token usage for the request.
+ """
+
+
+class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to call."""
+
+
+class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False):
+ """Specifies a tool the model should use.
+
+ Use to force the model to call a specific function.
+ """
+
+ function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction]
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice]
+
+
+class ToolFunction(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to be called.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ description: str
+ """
+ A description of what the function does, used by the model to choose when and
+ how to call the function.
+ """
+
+ parameters: Dict[str, object]
+ """The parameters the functions accepts, described as a JSON Schema object.
+
+ See the [guide](/docs/guides/function-calling) for examples, and the
+ [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ documentation about the format.
+
+ Omitting `parameters` defines a function with an empty parameter list.
+ """
+
+
+class Tool(TypedDict, total=False):
+ function: Required[ToolFunction]
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+ """
+
+
+class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+ """
+
+
+CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
diff --git a/src/gradient/types/agents/chat/completion_create_response.py b/src/gradient/types/agents/chat/completion_create_response.py
new file mode 100644
index 00000000..88c64763
--- /dev/null
+++ b/src/gradient/types/agents/chat/completion_create_response.py
@@ -0,0 +1,118 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from ...shared.completion_usage import CompletionUsage
+from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob
+
+__all__ = [
+ "CompletionCreateResponse",
+ "Choice",
+ "ChoiceLogprobs",
+ "ChoiceMessage",
+ "ChoiceMessageToolCall",
+ "ChoiceMessageToolCallFunction",
+]
+
+
+class ChoiceLogprobs(BaseModel):
+ """Log probability information for the choice."""
+
+ content: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message content tokens with log probability information."""
+
+ refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message refusal tokens with log probability information."""
+
+
+class ChoiceMessageToolCallFunction(BaseModel):
+ """The function that the model called."""
+
+ arguments: str
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: str
+ """The name of the function to call."""
+
+
+class ChoiceMessageToolCall(BaseModel):
+ id: str
+ """The ID of the tool call."""
+
+ function: ChoiceMessageToolCallFunction
+ """The function that the model called."""
+
+ type: Literal["function"]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class ChoiceMessage(BaseModel):
+ """A chat completion message generated by the model."""
+
+ content: Optional[str] = None
+ """The contents of the message."""
+
+ reasoning_content: Optional[str] = None
+ """The reasoning content generated by the model."""
+
+ refusal: Optional[str] = None
+ """The refusal message generated by the model."""
+
+ role: Literal["assistant"]
+ """The role of the author of this message."""
+
+ tool_calls: Optional[List[ChoiceMessageToolCall]] = None
+ """The tool calls generated by the model, such as function calls."""
+
+
+class Choice(BaseModel):
+ finish_reason: Literal["stop", "length", "tool_calls", "content_filter"]
+ """The reason the model stopped generating tokens.
+
+ This will be `stop` if the model hit a natural stop point or a provided stop
+ sequence, or `length` if the maximum number of tokens specified in the request
+ was reached, `tool_calls` if the model called a tool.
+ """
+
+ index: int
+ """The index of the choice in the list of choices."""
+
+ logprobs: Optional[ChoiceLogprobs] = None
+ """Log probability information for the choice."""
+
+ message: ChoiceMessage
+ """A chat completion message generated by the model."""
+
+
+class CompletionCreateResponse(BaseModel):
+ """
+ Represents a chat completion response returned by model, based on the provided input.
+ """
+
+ id: str
+ """A unique identifier for the chat completion."""
+
+ choices: List[Choice]
+ """A list of chat completion choices.
+
+ Can be more than one if `n` is greater than 1.
+ """
+
+ created: int
+ """The Unix timestamp (in seconds) of when the chat completion was created."""
+
+ model: str
+ """The model used for the chat completion."""
+
+ object: Literal["chat.completion"]
+ """The object type, which is always `chat.completion`."""
+
+ usage: Optional[CompletionUsage] = None
+ """Usage statistics for the completion request."""
diff --git a/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py
new file mode 100644
index 00000000..9412b46c
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import TypedDict
+
+__all__ = ["EvaluationDatasetCreateFileUploadPresignedURLsParams", "File"]
+
+
+class EvaluationDatasetCreateFileUploadPresignedURLsParams(TypedDict, total=False):
+ files: Iterable[File]
+ """A list of files to generate presigned URLs for."""
+
+
+class File(TypedDict, total=False):
+ """A single file’s metadata in the request."""
+
+ file_name: str
+ """Local filename"""
+
+ file_size: str
+ """The size of the file in bytes."""
diff --git a/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py
new file mode 100644
index 00000000..3648a9ed
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ..._models import BaseModel
+
+__all__ = ["EvaluationDatasetCreateFileUploadPresignedURLsResponse", "Upload"]
+
+
+class Upload(BaseModel):
+ """Detailed info about each presigned URL returned to the client."""
+
+ expires_at: Optional[datetime] = None
+ """The time the url expires at."""
+
+ object_key: Optional[str] = None
+ """The unique object key to store the file as."""
+
+ original_file_name: Optional[str] = None
+ """The original file name."""
+
+ presigned_url: Optional[str] = None
+ """The actual presigned URL the client can use to upload the file directly."""
+
+
+class EvaluationDatasetCreateFileUploadPresignedURLsResponse(BaseModel):
+ """Response with pre-signed urls to upload files."""
+
+ request_id: Optional[str] = None
+ """The ID generated for the request for Presigned URLs."""
+
+ uploads: Optional[List[Upload]] = None
+ """A list of generated presigned URLs and object keys, one per file."""
diff --git a/src/gradient/types/agents/evaluation_dataset_create_params.py b/src/gradient/types/agents/evaluation_dataset_create_params.py
new file mode 100644
index 00000000..661a42da
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_dataset_create_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+from ..knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam
+
+__all__ = ["EvaluationDatasetCreateParams"]
+
+
+class EvaluationDatasetCreateParams(TypedDict, total=False):
+ dataset_type: Literal[
+ "EVALUATION_DATASET_TYPE_UNKNOWN", "EVALUATION_DATASET_TYPE_ADK", "EVALUATION_DATASET_TYPE_NON_ADK"
+ ]
+
+ file_upload_dataset: APIFileUploadDataSourceParam
+ """File to upload as data source for knowledge base."""
+
+ name: str
+ """The name of the agent evaluation dataset."""
diff --git a/src/gradient/types/agents/evaluation_dataset_create_response.py b/src/gradient/types/agents/evaluation_dataset_create_response.py
new file mode 100644
index 00000000..4e5f8c9b
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_dataset_create_response.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["EvaluationDatasetCreateResponse"]
+
+
+class EvaluationDatasetCreateResponse(BaseModel):
+ """Output for creating an agent evaluation dataset"""
+
+ evaluation_dataset_uuid: Optional[str] = None
+ """Evaluation dataset uuid."""
diff --git a/src/gradient/types/agents/evaluation_metric_list_regions_params.py b/src/gradient/types/agents/evaluation_metric_list_regions_params.py
new file mode 100644
index 00000000..701e7d4e
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metric_list_regions_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["EvaluationMetricListRegionsParams"]
+
+
+class EvaluationMetricListRegionsParams(TypedDict, total=False):
+ serves_batch: bool
+ """Include datacenters that are capable of running batch jobs."""
+
+ serves_inference: bool
+ """Include datacenters that serve inference."""
diff --git a/src/gradient/types/agents/evaluation_metric_list_regions_response.py b/src/gradient/types/agents/evaluation_metric_list_regions_response.py
new file mode 100644
index 00000000..dc07a7ef
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metric_list_regions_response.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["EvaluationMetricListRegionsResponse", "Region"]
+
+
+class Region(BaseModel):
+ """Description for a specific Region"""
+
+ inference_url: Optional[str] = None
+ """Url for inference server"""
+
+ region: Optional[str] = None
+ """Region code"""
+
+ serves_batch: Optional[bool] = None
+ """This datacenter is capable of running batch jobs"""
+
+ serves_inference: Optional[bool] = None
+ """This datacenter is capable of serving inference"""
+
+ stream_inference_url: Optional[str] = None
+ """The url for the inference streaming server"""
+
+
+class EvaluationMetricListRegionsResponse(BaseModel):
+ """Region Codes"""
+
+ regions: Optional[List[Region]] = None
+ """Region code"""
diff --git a/src/gradient/types/agents/evaluation_metric_list_response.py b/src/gradient/types/agents/evaluation_metric_list_response.py
new file mode 100644
index 00000000..0708f1ba
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metric_list_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .api_evaluation_metric import APIEvaluationMetric
+
+__all__ = ["EvaluationMetricListResponse"]
+
+
+class EvaluationMetricListResponse(BaseModel):
+ metrics: Optional[List[APIEvaluationMetric]] = None
diff --git a/src/gradient/types/agents/evaluation_metrics/__init__.py b/src/gradient/types/agents/evaluation_metrics/__init__.py
new file mode 100644
index 00000000..971eddef
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/__init__.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from . import (
+ openai, # type: ignore # noqa: F401
+ anthropic, # type: ignore # noqa: F401
+ workspaces, # type: ignore # noqa: F401
+)
+from .workspace_create_params import WorkspaceCreateParams as WorkspaceCreateParams
+from .workspace_list_response import WorkspaceListResponse as WorkspaceListResponse
+from .workspace_update_params import WorkspaceUpdateParams as WorkspaceUpdateParams
+from .workspace_create_response import WorkspaceCreateResponse as WorkspaceCreateResponse
+from .workspace_delete_response import WorkspaceDeleteResponse as WorkspaceDeleteResponse
+from .workspace_update_response import WorkspaceUpdateResponse as WorkspaceUpdateResponse
+from .oauth2_generate_url_params import Oauth2GenerateURLParams as Oauth2GenerateURLParams
+from .workspace_retrieve_response import WorkspaceRetrieveResponse as WorkspaceRetrieveResponse
+from .oauth2_generate_url_response import Oauth2GenerateURLResponse as Oauth2GenerateURLResponse
+from .scheduled_indexing_create_params import ScheduledIndexingCreateParams as ScheduledIndexingCreateParams
+from .scheduled_indexing_create_response import ScheduledIndexingCreateResponse as ScheduledIndexingCreateResponse
+from .scheduled_indexing_delete_response import ScheduledIndexingDeleteResponse as ScheduledIndexingDeleteResponse
+from .scheduled_indexing_retrieve_response import ScheduledIndexingRetrieveResponse as ScheduledIndexingRetrieveResponse
+from .workspace_list_evaluation_test_cases_response import (
+ WorkspaceListEvaluationTestCasesResponse as WorkspaceListEvaluationTestCasesResponse,
+)
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/__init__.py b/src/gradient/types/agents/evaluation_metrics/anthropic/__init__.py
new file mode 100644
index 00000000..eb47e709
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .key_list_params import KeyListParams as KeyListParams
+from .key_create_params import KeyCreateParams as KeyCreateParams
+from .key_list_response import KeyListResponse as KeyListResponse
+from .key_update_params import KeyUpdateParams as KeyUpdateParams
+from .key_create_response import KeyCreateResponse as KeyCreateResponse
+from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse
+from .key_update_response import KeyUpdateResponse as KeyUpdateResponse
+from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse
+from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams
+from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_params.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_params.py
new file mode 100644
index 00000000..55f44139
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyCreateParams"]
+
+
+class KeyCreateParams(TypedDict, total=False):
+ api_key: str
+ """Anthropic API key"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_response.py
new file mode 100644
index 00000000..34babe47
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyCreateResponse"]
+
+
+class KeyCreateResponse(BaseModel):
+ """
+ CreateAnthropicAPIKeyOutput is used to return the newly created Anthropic API key.
+ """
+
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_delete_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_delete_response.py
new file mode 100644
index 00000000..c2796b36
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyDeleteResponse"]
+
+
+class KeyDeleteResponse(BaseModel):
+ """DeleteAnthropicAPIKeyOutput is used to return the deleted Anthropic API key."""
+
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py
new file mode 100644
index 00000000..566c39f7
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListAgentsParams"]
+
+
+class KeyListAgentsParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py
new file mode 100644
index 00000000..34ab7508
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ....._models import BaseModel
+from ....shared.api_meta import APIMeta
+from ....shared.api_links import APILinks
+
+__all__ = ["KeyListAgentsResponse"]
+
+
+class KeyListAgentsResponse(BaseModel):
+ """List of Agents that linked to a specific Anthropic Key"""
+
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+
+from ....api_agent import APIAgent
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_params.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_params.py
new file mode 100644
index 00000000..1611dc03
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListParams"]
+
+
+class KeyListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_response.py
new file mode 100644
index 00000000..21729e57
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_response.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ....._models import BaseModel
+from ....shared.api_meta import APIMeta
+from ....shared.api_links import APILinks
+from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyListResponse"]
+
+
+class KeyListResponse(BaseModel):
+ """
+ ListAnthropicAPIKeysOutput is used to return the list of Anthropic API keys for a specific agent.
+ """
+
+ api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None
+ """Api key infos"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py
new file mode 100644
index 00000000..a100ec29
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyRetrieveResponse"]
+
+
+class KeyRetrieveResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_params.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_params.py
new file mode 100644
index 00000000..0d542bbb
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ....._utils import PropertyInfo
+
+__all__ = ["KeyUpdateParams"]
+
+
+class KeyUpdateParams(TypedDict, total=False):
+ api_key: str
+ """Anthropic API key"""
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+ """API key ID"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_response.py
new file mode 100644
index 00000000..04d20e9b
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyUpdateResponse"]
+
+
+class KeyUpdateResponse(BaseModel):
+ """UpdateAnthropicAPIKeyOutput is used to return the updated Anthropic API key."""
+
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2/__init__.py b/src/gradient/types/agents/evaluation_metrics/oauth2/__init__.py
new file mode 100644
index 00000000..e686ce35
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/oauth2/__init__.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .dropbox_create_tokens_params import DropboxCreateTokensParams as DropboxCreateTokensParams
+from .dropbox_create_tokens_response import DropboxCreateTokensResponse as DropboxCreateTokensResponse
diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_params.py b/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_params.py
new file mode 100644
index 00000000..00d22cce
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["DropboxCreateTokensParams"]
+
+
+class DropboxCreateTokensParams(TypedDict, total=False):
+ code: str
+ """The oauth2 code from google"""
+
+ redirect_url: str
+ """Redirect url"""
diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py b/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py
new file mode 100644
index 00000000..6277059b
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+
+__all__ = ["DropboxCreateTokensResponse"]
+
+
+class DropboxCreateTokensResponse(BaseModel):
+ """The dropbox oauth2 token and refresh token"""
+
+ token: Optional[str] = None
+ """The access token"""
+
+ refresh_token: Optional[str] = None
+ """The refresh token"""
diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_params.py b/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_params.py
new file mode 100644
index 00000000..68924774
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["Oauth2GenerateURLParams"]
+
+
+class Oauth2GenerateURLParams(TypedDict, total=False):
+ redirect_url: str
+ """The redirect url."""
+
+ type: str
+ """Type "google" / "dropbox"."""
diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py b/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py
new file mode 100644
index 00000000..f1e782c4
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["Oauth2GenerateURLResponse"]
+
+
+class Oauth2GenerateURLResponse(BaseModel):
+ """The url for the oauth2 flow"""
+
+ url: Optional[str] = None
+ """The oauth2 url"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/__init__.py b/src/gradient/types/agents/evaluation_metrics/openai/__init__.py
new file mode 100644
index 00000000..eb47e709
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .key_list_params import KeyListParams as KeyListParams
+from .key_create_params import KeyCreateParams as KeyCreateParams
+from .key_list_response import KeyListResponse as KeyListResponse
+from .key_update_params import KeyUpdateParams as KeyUpdateParams
+from .key_create_response import KeyCreateResponse as KeyCreateResponse
+from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse
+from .key_update_response import KeyUpdateResponse as KeyUpdateResponse
+from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse
+from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams
+from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_create_params.py b/src/gradient/types/agents/evaluation_metrics/openai/key_create_params.py
new file mode 100644
index 00000000..5f4975dd
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyCreateParams"]
+
+
+class KeyCreateParams(TypedDict, total=False):
+ api_key: str
+ """OpenAI API key"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_create_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_create_response.py
new file mode 100644
index 00000000..f6254e1c
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_create_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyCreateResponse"]
+
+
+class KeyCreateResponse(BaseModel):
+ """CreateOpenAIAPIKeyOutput is used to return the newly created OpenAI API key."""
+
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_delete_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_delete_response.py
new file mode 100644
index 00000000..1ac937f4
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyDeleteResponse"]
+
+
+class KeyDeleteResponse(BaseModel):
+ """DeleteOpenAIAPIKeyOutput is used to return the deleted OpenAI API key."""
+
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_params.py b/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_params.py
new file mode 100644
index 00000000..566c39f7
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListAgentsParams"]
+
+
+class KeyListAgentsParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_response.py
new file mode 100644
index 00000000..fa2ba7cc
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_response.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ....._models import BaseModel
+from ....shared.api_meta import APIMeta
+from ....shared.api_links import APILinks
+
+__all__ = ["KeyListAgentsResponse"]
+
+
+class KeyListAgentsResponse(BaseModel):
+ """List of Agents that are linked to a specific OpenAI Key"""
+
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+
+from ....api_agent import APIAgent
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_list_params.py b/src/gradient/types/agents/evaluation_metrics/openai/key_list_params.py
new file mode 100644
index 00000000..1611dc03
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListParams"]
+
+
+class KeyListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_list_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_list_response.py
new file mode 100644
index 00000000..f335cfc9
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_list_response.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ....._models import BaseModel
+from ....shared.api_meta import APIMeta
+from ....shared.api_links import APILinks
+from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyListResponse"]
+
+
+class KeyListResponse(BaseModel):
+ """
+ ListOpenAIAPIKeysOutput is used to return the list of OpenAI API keys for a specific agent.
+ """
+
+ api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None
+ """Api key infos"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_retrieve_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_retrieve_response.py
new file mode 100644
index 00000000..9ba42cd2
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_retrieve_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyRetrieveResponse"]
+
+
+class KeyRetrieveResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_update_params.py b/src/gradient/types/agents/evaluation_metrics/openai/key_update_params.py
new file mode 100644
index 00000000..3960cf36
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_update_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ....._utils import PropertyInfo
+
+__all__ = ["KeyUpdateParams"]
+
+
+class KeyUpdateParams(TypedDict, total=False):
+ api_key: str
+ """OpenAI API key"""
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+ """API key ID"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/gradient/types/agents/evaluation_metrics/openai/key_update_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_update_response.py
new file mode 100644
index 00000000..28b56926
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/openai/key_update_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyUpdateResponse"]
+
+
+class KeyUpdateResponse(BaseModel):
+ """UpdateOpenAIAPIKeyOutput is used to return the updated OpenAI API key."""
+
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_params.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_params.py
new file mode 100644
index 00000000..209766b4
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import TypedDict
+
+__all__ = ["ScheduledIndexingCreateParams"]
+
+
+class ScheduledIndexingCreateParams(TypedDict, total=False):
+ days: Iterable[int]
+ """Days for execution (day is represented same as in a cron expression, e.g.
+
+ Monday begins with 1 )
+ """
+
+ knowledge_base_uuid: str
+ """Knowledge base uuid for which the schedule is created"""
+
+ time: str
+ """Time of execution (HH:MM) UTC"""
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py
new file mode 100644
index 00000000..78cb1e73
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py
@@ -0,0 +1,50 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ...._models import BaseModel
+
+__all__ = ["ScheduledIndexingCreateResponse", "IndexingInfo"]
+
+
+class IndexingInfo(BaseModel):
+ """Metadata for scheduled indexing entries"""
+
+ created_at: Optional[datetime] = None
+ """Created at timestamp"""
+
+ days: Optional[List[int]] = None
+ """Days for execution (day is represented same as in a cron expression, e.g.
+
+ Monday begins with 1 )
+ """
+
+ deleted_at: Optional[datetime] = None
+ """Deleted at timestamp (if soft deleted)"""
+
+ is_active: Optional[bool] = None
+ """Whether the schedule is currently active"""
+
+ knowledge_base_uuid: Optional[str] = None
+ """Knowledge base uuid associated with this schedule"""
+
+ last_ran_at: Optional[datetime] = None
+ """Last time the schedule was executed"""
+
+ next_run_at: Optional[datetime] = None
+ """Next scheduled run"""
+
+ time: Optional[str] = None
+ """Scheduled time of execution (HH:MM:SS format)"""
+
+ updated_at: Optional[datetime] = None
+ """Updated at timestamp"""
+
+ uuid: Optional[str] = None
+ """Unique identifier for the scheduled indexing entry"""
+
+
+class ScheduledIndexingCreateResponse(BaseModel):
+ indexing_info: Optional[IndexingInfo] = None
+ """Metadata for scheduled indexing entries"""
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py
new file mode 100644
index 00000000..b359cb18
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py
@@ -0,0 +1,50 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ...._models import BaseModel
+
+__all__ = ["ScheduledIndexingDeleteResponse", "IndexingInfo"]
+
+
+class IndexingInfo(BaseModel):
+ """Metadata for scheduled indexing entries"""
+
+ created_at: Optional[datetime] = None
+ """Created at timestamp"""
+
+ days: Optional[List[int]] = None
+ """Days for execution (day is represented same as in a cron expression, e.g.
+
+ Monday begins with 1 )
+ """
+
+ deleted_at: Optional[datetime] = None
+ """Deleted at timestamp (if soft deleted)"""
+
+ is_active: Optional[bool] = None
+ """Whether the schedule is currently active"""
+
+ knowledge_base_uuid: Optional[str] = None
+ """Knowledge base uuid associated with this schedule"""
+
+ last_ran_at: Optional[datetime] = None
+ """Last time the schedule was executed"""
+
+ next_run_at: Optional[datetime] = None
+ """Next scheduled run"""
+
+ time: Optional[str] = None
+ """Scheduled time of execution (HH:MM:SS format)"""
+
+ updated_at: Optional[datetime] = None
+ """Updated at timestamp"""
+
+ uuid: Optional[str] = None
+ """Unique identifier for the scheduled indexing entry"""
+
+
+class ScheduledIndexingDeleteResponse(BaseModel):
+ indexing_info: Optional[IndexingInfo] = None
+ """Metadata for scheduled indexing entries"""
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py
new file mode 100644
index 00000000..4d3840a3
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py
@@ -0,0 +1,50 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ...._models import BaseModel
+
+__all__ = ["ScheduledIndexingRetrieveResponse", "IndexingInfo"]
+
+
+class IndexingInfo(BaseModel):
+ """Metadata for scheduled indexing entries"""
+
+ created_at: Optional[datetime] = None
+ """Created at timestamp"""
+
+ days: Optional[List[int]] = None
+ """Days for execution (day is represented same as in a cron expression, e.g.
+
+ Monday begins with 1 )
+ """
+
+ deleted_at: Optional[datetime] = None
+ """Deleted at timestamp (if soft deleted)"""
+
+ is_active: Optional[bool] = None
+ """Whether the schedule is currently active"""
+
+ knowledge_base_uuid: Optional[str] = None
+ """Knowledge base uuid associated with this schedule"""
+
+ last_ran_at: Optional[datetime] = None
+ """Last time the schedule was executed"""
+
+ next_run_at: Optional[datetime] = None
+ """Next scheduled run"""
+
+ time: Optional[str] = None
+ """Scheduled time of execution (HH:MM:SS format)"""
+
+ updated_at: Optional[datetime] = None
+ """Updated at timestamp"""
+
+ uuid: Optional[str] = None
+ """Unique identifier for the scheduled indexing entry"""
+
+
+class ScheduledIndexingRetrieveResponse(BaseModel):
+ indexing_info: Optional[IndexingInfo] = None
+ """Metadata for scheduled indexing entries"""
diff --git a/src/gradient/types/agents/evaluation_metrics/workspace_create_params.py b/src/gradient/types/agents/evaluation_metrics/workspace_create_params.py
new file mode 100644
index 00000000..443a6f43
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspace_create_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["WorkspaceCreateParams"]
+
+
+class WorkspaceCreateParams(TypedDict, total=False):
+ agent_uuids: SequenceNotStr[str]
+ """Ids of the agents(s) to attach to the workspace"""
+
+ description: str
+ """Description of the workspace"""
+
+ name: str
+ """Name of the workspace"""
diff --git a/src/gradient/types/agents/evaluation_metrics/workspace_create_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_create_response.py
new file mode 100644
index 00000000..419ec288
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspace_create_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["WorkspaceCreateResponse"]
+
+
+class WorkspaceCreateResponse(BaseModel):
+ workspace: Optional["APIWorkspace"] = None
+
+
+from ...api_workspace import APIWorkspace
diff --git a/src/gradient/types/agents/evaluation_metrics/workspace_delete_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_delete_response.py
new file mode 100644
index 00000000..3e094515
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspace_delete_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["WorkspaceDeleteResponse"]
+
+
+class WorkspaceDeleteResponse(BaseModel):
+ workspace_uuid: Optional[str] = None
+ """Workspace"""
diff --git a/src/gradient/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py
new file mode 100644
index 00000000..32c613f8
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ..api_evaluation_test_case import APIEvaluationTestCase
+
+__all__ = ["WorkspaceListEvaluationTestCasesResponse"]
+
+
+class WorkspaceListEvaluationTestCasesResponse(BaseModel):
+ evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None
diff --git a/src/gradient/types/agents/evaluation_metrics/workspace_list_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_list_response.py
new file mode 100644
index 00000000..793623dd
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspace_list_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+
+__all__ = ["WorkspaceListResponse"]
+
+
+class WorkspaceListResponse(BaseModel):
+ workspaces: Optional[List["APIWorkspace"]] = None
+ """Workspaces"""
+
+
+from ...api_workspace import APIWorkspace
diff --git a/src/gradient/types/agents/evaluation_metrics/workspace_retrieve_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_retrieve_response.py
new file mode 100644
index 00000000..fa4a567c
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspace_retrieve_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["WorkspaceRetrieveResponse"]
+
+
+class WorkspaceRetrieveResponse(BaseModel):
+ workspace: Optional["APIWorkspace"] = None
+
+
+from ...api_workspace import APIWorkspace
diff --git a/src/gradient/types/agents/evaluation_metrics/workspace_update_params.py b/src/gradient/types/agents/evaluation_metrics/workspace_update_params.py
new file mode 100644
index 00000000..d5906bd9
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspace_update_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ...._utils import PropertyInfo
+
+__all__ = ["WorkspaceUpdateParams"]
+
+
+class WorkspaceUpdateParams(TypedDict, total=False):
+ description: str
+ """The new description of the workspace"""
+
+ name: str
+ """The new name of the workspace"""
+
+ body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")]
+ """Workspace UUID."""
diff --git a/src/gradient/types/agents/evaluation_metrics/workspace_update_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_update_response.py
new file mode 100644
index 00000000..77dac88c
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspace_update_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["WorkspaceUpdateResponse"]
+
+
+class WorkspaceUpdateResponse(BaseModel):
+ workspace: Optional["APIWorkspace"] = None
+
+
+from ...api_workspace import APIWorkspace
diff --git a/src/gradient/types/agents/evaluation_metrics/workspaces/__init__.py b/src/gradient/types/agents/evaluation_metrics/workspaces/__init__.py
new file mode 100644
index 00000000..9f369c7c
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspaces/__init__.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .agent_list_params import AgentListParams as AgentListParams
+from .agent_move_params import AgentMoveParams as AgentMoveParams
+from .agent_list_response import AgentListResponse as AgentListResponse
+from .agent_move_response import AgentMoveResponse as AgentMoveResponse
diff --git a/src/gradient/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_list_params.py
new file mode 100644
index 00000000..b56d0395
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_list_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AgentListParams"]
+
+
+class AgentListParams(TypedDict, total=False):
+ only_deployed: bool
+ """Only list agents that are deployed."""
+
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_list_response.py
new file mode 100644
index 00000000..6f9ea948
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_list_response.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ....._models import BaseModel
+from ....shared.api_meta import APIMeta
+from ....shared.api_links import APILinks
+
+__all__ = ["AgentListResponse"]
+
+
+class AgentListResponse(BaseModel):
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+
+from ....api_agent import APIAgent
diff --git a/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_params.py
new file mode 100644
index 00000000..7b451084
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ....._types import SequenceNotStr
+from ....._utils import PropertyInfo
+
+__all__ = ["AgentMoveParams"]
+
+
+class AgentMoveParams(TypedDict, total=False):
+ agent_uuids: SequenceNotStr[str]
+ """Agent uuids"""
+
+ body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")]
+ """Workspace uuid to move agents to"""
diff --git a/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_response.py b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_response.py
new file mode 100644
index 00000000..d2d084d5
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ....._models import BaseModel
+
+__all__ = ["AgentMoveResponse"]
+
+
+class AgentMoveResponse(BaseModel):
+ workspace: Optional["APIWorkspace"] = None
+
+
+from ....api_workspace import APIWorkspace
diff --git a/src/gradient/types/agents/evaluation_run_create_params.py b/src/gradient/types/agents/evaluation_run_create_params.py
new file mode 100644
index 00000000..b5e60803
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_run_create_params.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = ["EvaluationRunCreateParams"]
+
+
+class EvaluationRunCreateParams(TypedDict, total=False):
+ agent_deployment_names: SequenceNotStr[str]
+ """Agent deployment names to run the test case against (ADK agent workspaces)."""
+
+ agent_uuids: SequenceNotStr[str]
+ """Agent UUIDs to run the test case against (legacy agents)."""
+
+ run_name: str
+ """The name of the run."""
+
+ test_case_uuid: str
+ """Test-case UUID to run"""
diff --git a/src/gradient/types/agents/evaluation_run_create_response.py b/src/gradient/types/agents/evaluation_run_create_response.py
new file mode 100644
index 00000000..90da2e61
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_run_create_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["EvaluationRunCreateResponse"]
+
+
+class EvaluationRunCreateResponse(BaseModel):
+ evaluation_run_uuids: Optional[List[str]] = None
diff --git a/src/gradient/types/agents/evaluation_run_list_results_params.py b/src/gradient/types/agents/evaluation_run_list_results_params.py
new file mode 100644
index 00000000..bcf96c14
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_run_list_results_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["EvaluationRunListResultsParams"]
+
+
+class EvaluationRunListResultsParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/agents/evaluation_run_list_results_response.py b/src/gradient/types/agents/evaluation_run_list_results_response.py
new file mode 100644
index 00000000..e06bac94
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_run_list_results_response.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.api_meta import APIMeta
+from ..shared.api_links import APILinks
+from .api_evaluation_run import APIEvaluationRun
+from .api_evaluation_prompt import APIEvaluationPrompt
+
+__all__ = ["EvaluationRunListResultsResponse"]
+
+
+class EvaluationRunListResultsResponse(BaseModel):
+ """Gets the full results of an evaluation run with all prompts."""
+
+ evaluation_run: Optional[APIEvaluationRun] = None
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+ prompts: Optional[List[APIEvaluationPrompt]] = None
+ """The prompt level results."""
diff --git a/src/gradient/types/agents/evaluation_run_retrieve_response.py b/src/gradient/types/agents/evaluation_run_retrieve_response.py
new file mode 100644
index 00000000..cedba220
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_run_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_evaluation_run import APIEvaluationRun
+
+__all__ = ["EvaluationRunRetrieveResponse"]
+
+
+class EvaluationRunRetrieveResponse(BaseModel):
+ evaluation_run: Optional[APIEvaluationRun] = None
diff --git a/src/gradient/types/agents/evaluation_run_retrieve_results_response.py b/src/gradient/types/agents/evaluation_run_retrieve_results_response.py
new file mode 100644
index 00000000..4bb70732
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_run_retrieve_results_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_evaluation_prompt import APIEvaluationPrompt
+
+__all__ = ["EvaluationRunRetrieveResultsResponse"]
+
+
+class EvaluationRunRetrieveResultsResponse(BaseModel):
+ prompt: Optional[APIEvaluationPrompt] = None
diff --git a/src/gradient/types/agents/evaluation_test_case_create_params.py b/src/gradient/types/agents/evaluation_test_case_create_params.py
new file mode 100644
index 00000000..ff0666b9
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_test_case_create_params.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from ..._types import SequenceNotStr
+from .api_star_metric_param import APIStarMetricParam
+
+__all__ = ["EvaluationTestCaseCreateParams"]
+
+
+class EvaluationTestCaseCreateParams(TypedDict, total=False):
+ agent_workspace_name: str
+
+ dataset_uuid: str
+ """Dataset against which the test‑case is executed."""
+
+ description: str
+ """Description of the test case."""
+
+ metrics: SequenceNotStr[str]
+ """Full metric list to use for evaluation test case."""
+
+ name: str
+ """Name of the test case."""
+
+ star_metric: APIStarMetricParam
+
+ workspace_uuid: str
+ """The workspace uuid."""
diff --git a/src/gradient/types/agents/evaluation_test_case_create_response.py b/src/gradient/types/agents/evaluation_test_case_create_response.py
new file mode 100644
index 00000000..9f8e37f4
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_test_case_create_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["EvaluationTestCaseCreateResponse"]
+
+
+class EvaluationTestCaseCreateResponse(BaseModel):
+ test_case_uuid: Optional[str] = None
+ """Test‑case UUID."""
diff --git a/src/gradient/types/agents/evaluation_test_case_list_evaluation_runs_params.py b/src/gradient/types/agents/evaluation_test_case_list_evaluation_runs_params.py
new file mode 100644
index 00000000..7f30ee28
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_test_case_list_evaluation_runs_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["EvaluationTestCaseListEvaluationRunsParams"]
+
+
+class EvaluationTestCaseListEvaluationRunsParams(TypedDict, total=False):
+ evaluation_test_case_version: int
+ """Version of the test case."""
diff --git a/src/gradient/types/agents/evaluation_test_case_list_evaluation_runs_response.py b/src/gradient/types/agents/evaluation_test_case_list_evaluation_runs_response.py
new file mode 100644
index 00000000..d9565e97
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_test_case_list_evaluation_runs_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .api_evaluation_run import APIEvaluationRun
+
+__all__ = ["EvaluationTestCaseListEvaluationRunsResponse"]
+
+
+class EvaluationTestCaseListEvaluationRunsResponse(BaseModel):
+ evaluation_runs: Optional[List[APIEvaluationRun]] = None
+ """List of evaluation runs."""
diff --git a/src/gradient/types/agents/evaluation_test_case_list_response.py b/src/gradient/types/agents/evaluation_test_case_list_response.py
new file mode 100644
index 00000000..62b97961
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_test_case_list_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .api_evaluation_test_case import APIEvaluationTestCase
+
+__all__ = ["EvaluationTestCaseListResponse"]
+
+
+class EvaluationTestCaseListResponse(BaseModel):
+ evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None
+ """
+ Alternative way of authentication for internal usage only - should not be
+ exposed to public api
+ """
diff --git a/src/gradient/types/agents/evaluation_test_case_retrieve_params.py b/src/gradient/types/agents/evaluation_test_case_retrieve_params.py
new file mode 100644
index 00000000..f84fe876
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_test_case_retrieve_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["EvaluationTestCaseRetrieveParams"]
+
+
+class EvaluationTestCaseRetrieveParams(TypedDict, total=False):
+ evaluation_test_case_version: int
+ """Version of the test case."""
diff --git a/src/gradient/types/agents/evaluation_test_case_retrieve_response.py b/src/gradient/types/agents/evaluation_test_case_retrieve_response.py
new file mode 100644
index 00000000..1511ba74
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_test_case_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_evaluation_test_case import APIEvaluationTestCase
+
+__all__ = ["EvaluationTestCaseRetrieveResponse"]
+
+
+class EvaluationTestCaseRetrieveResponse(BaseModel):
+ evaluation_test_case: Optional[APIEvaluationTestCase] = None
diff --git a/src/gradient/types/agents/evaluation_test_case_update_params.py b/src/gradient/types/agents/evaluation_test_case_update_params.py
new file mode 100644
index 00000000..d707d909
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_test_case_update_params.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ..._types import SequenceNotStr
+from ..._utils import PropertyInfo
+from .api_star_metric_param import APIStarMetricParam
+
+__all__ = ["EvaluationTestCaseUpdateParams", "Metrics"]
+
+
+class EvaluationTestCaseUpdateParams(TypedDict, total=False):
+ dataset_uuid: str
+ """Dataset against which the test‑case is executed."""
+
+ description: str
+ """Description of the test case."""
+
+ metrics: Metrics
+
+ name: str
+ """Name of the test case."""
+
+ star_metric: APIStarMetricParam
+
+ body_test_case_uuid: Annotated[str, PropertyInfo(alias="test_case_uuid")]
+ """Test-case UUID to update"""
+
+
+class Metrics(TypedDict, total=False):
+ metric_uuids: SequenceNotStr[str]
diff --git a/src/gradient/types/agents/evaluation_test_case_update_response.py b/src/gradient/types/agents/evaluation_test_case_update_response.py
new file mode 100644
index 00000000..6f8e3b04
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_test_case_update_response.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["EvaluationTestCaseUpdateResponse"]
+
+
+class EvaluationTestCaseUpdateResponse(BaseModel):
+ test_case_uuid: Optional[str] = None
+
+ version: Optional[int] = None
+ """The new verson of the test case."""
diff --git a/src/gradient/types/agents/function_create_params.py b/src/gradient/types/agents/function_create_params.py
new file mode 100644
index 00000000..000de32b
--- /dev/null
+++ b/src/gradient/types/agents/function_create_params.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+
+__all__ = ["FunctionCreateParams"]
+
+
+class FunctionCreateParams(TypedDict, total=False):
+ body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")]
+ """Agent id"""
+
+ description: str
+ """Function description"""
+
+ faas_name: str
+ """The name of the function in the DigitalOcean functions platform"""
+
+ faas_namespace: str
+ """The namespace of the function in the DigitalOcean functions platform"""
+
+ function_name: str
+ """Function name"""
+
+ input_schema: object
+ """Describe the input schema for the function so the agent may call it"""
+
+ output_schema: object
+ """Describe the output schema for the function so the agent handle its response"""
diff --git a/src/gradient/types/agents/function_create_response.py b/src/gradient/types/agents/function_create_response.py
new file mode 100644
index 00000000..335ebac0
--- /dev/null
+++ b/src/gradient/types/agents/function_create_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["FunctionCreateResponse"]
+
+
+class FunctionCreateResponse(BaseModel):
+ """Information about a newly function linked agent"""
+
+ agent: Optional["APIAgent"] = None
+ """An Agent"""
+
+
+from ..api_agent import APIAgent
diff --git a/src/gradient/types/agents/function_delete_response.py b/src/gradient/types/agents/function_delete_response.py
new file mode 100644
index 00000000..7490d34d
--- /dev/null
+++ b/src/gradient/types/agents/function_delete_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["FunctionDeleteResponse"]
+
+
+class FunctionDeleteResponse(BaseModel):
+ """Information about a newly unlinked agent"""
+
+ agent: Optional["APIAgent"] = None
+ """An Agent"""
+
+
+from ..api_agent import APIAgent
diff --git a/src/gradient/types/agents/function_update_params.py b/src/gradient/types/agents/function_update_params.py
new file mode 100644
index 00000000..67c6ea9b
--- /dev/null
+++ b/src/gradient/types/agents/function_update_params.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+
+__all__ = ["FunctionUpdateParams"]
+
+
+class FunctionUpdateParams(TypedDict, total=False):
+ path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]]
+
+ body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")]
+ """Agent id"""
+
+ description: str
+ """Funciton description"""
+
+ faas_name: str
+ """The name of the function in the DigitalOcean functions platform"""
+
+ faas_namespace: str
+ """The namespace of the function in the DigitalOcean functions platform"""
+
+ function_name: str
+ """Function name"""
+
+ body_function_uuid: Annotated[str, PropertyInfo(alias="function_uuid")]
+ """Function id"""
+
+ input_schema: object
+ """Describe the input schema for the function so the agent may call it"""
+
+ output_schema: object
+ """Describe the output schema for the function so the agent handle its response"""
diff --git a/src/gradient/types/agents/function_update_response.py b/src/gradient/types/agents/function_update_response.py
new file mode 100644
index 00000000..72399e92
--- /dev/null
+++ b/src/gradient/types/agents/function_update_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["FunctionUpdateResponse"]
+
+
+class FunctionUpdateResponse(BaseModel):
+ """The updated agent"""
+
+ agent: Optional["APIAgent"] = None
+ """An Agent"""
+
+
+from ..api_agent import APIAgent
diff --git a/src/gradient/types/agents/knowledge_base_detach_response.py b/src/gradient/types/agents/knowledge_base_detach_response.py
new file mode 100644
index 00000000..c94b99a1
--- /dev/null
+++ b/src/gradient/types/agents/knowledge_base_detach_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["KnowledgeBaseDetachResponse"]
+
+
+class KnowledgeBaseDetachResponse(BaseModel):
+ """Informatinon about a unlinked knowledge base"""
+
+ agent: Optional["APIAgent"] = None
+ """An Agent"""
+
+
+from ..api_agent import APIAgent
diff --git a/src/gradient/types/agents/route_add_params.py b/src/gradient/types/agents/route_add_params.py
new file mode 100644
index 00000000..d8dbeff8
--- /dev/null
+++ b/src/gradient/types/agents/route_add_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+
+__all__ = ["RouteAddParams"]
+
+
+class RouteAddParams(TypedDict, total=False):
+ path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]]
+
+ body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")]
+ """Routed agent id"""
+
+ if_case: str
+
+ body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")]
+ """A unique identifier for the parent agent."""
+
+ route_name: str
+ """Name of route"""
diff --git a/src/gradient/types/agents/route_add_response.py b/src/gradient/types/agents/route_add_response.py
new file mode 100644
index 00000000..b1755d54
--- /dev/null
+++ b/src/gradient/types/agents/route_add_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["RouteAddResponse"]
+
+
+class RouteAddResponse(BaseModel):
+ """Information about a newly linked agent"""
+
+ child_agent_uuid: Optional[str] = None
+ """Routed agent id"""
+
+ parent_agent_uuid: Optional[str] = None
+ """A unique identifier for the parent agent."""
diff --git a/src/gradient/types/agents/route_delete_response.py b/src/gradient/types/agents/route_delete_response.py
new file mode 100644
index 00000000..6dcc03b6
--- /dev/null
+++ b/src/gradient/types/agents/route_delete_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["RouteDeleteResponse"]
+
+
+class RouteDeleteResponse(BaseModel):
+ """Information about a removed linkage"""
+
+ child_agent_uuid: Optional[str] = None
+ """Routed agent id"""
+
+ parent_agent_uuid: Optional[str] = None
+ """Pagent agent id"""
diff --git a/src/gradient/types/agents/route_update_params.py b/src/gradient/types/agents/route_update_params.py
new file mode 100644
index 00000000..453a3b93
--- /dev/null
+++ b/src/gradient/types/agents/route_update_params.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+
+__all__ = ["RouteUpdateParams"]
+
+
+class RouteUpdateParams(TypedDict, total=False):
+ path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]]
+
+ body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")]
+ """Routed agent id"""
+
+ if_case: str
+ """Describes the case in which the child agent should be used"""
+
+ body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")]
+ """A unique identifier for the parent agent."""
+
+ route_name: str
+ """Route name"""
+
+ uuid: str
+ """Unique id of linkage"""
diff --git a/src/gradient/types/agents/route_update_response.py b/src/gradient/types/agents/route_update_response.py
new file mode 100644
index 00000000..dfcec469
--- /dev/null
+++ b/src/gradient/types/agents/route_update_response.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["RouteUpdateResponse"]
+
+
+class RouteUpdateResponse(BaseModel):
+ """Information about an updated linkage"""
+
+ child_agent_uuid: Optional[str] = None
+ """Routed agent id"""
+
+ parent_agent_uuid: Optional[str] = None
+ """A unique identifier for the parent agent."""
+
+ rollback: Optional[bool] = None
+
+ uuid: Optional[str] = None
+ """Unique id of linkage"""
diff --git a/src/gradient/types/agents/route_view_response.py b/src/gradient/types/agents/route_view_response.py
new file mode 100644
index 00000000..ddbf6f33
--- /dev/null
+++ b/src/gradient/types/agents/route_view_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["RouteViewResponse"]
+
+
+class RouteViewResponse(BaseModel):
+ """Child list for an agent"""
+
+ children: Optional[List["APIAgent"]] = None
+ """Child agents"""
+
+
+from ..api_agent import APIAgent
diff --git a/src/gradient/types/agents/version_list_params.py b/src/gradient/types/agents/version_list_params.py
new file mode 100644
index 00000000..e8fa2f6d
--- /dev/null
+++ b/src/gradient/types/agents/version_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["VersionListParams"]
+
+
+class VersionListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/agents/version_list_response.py b/src/gradient/types/agents/version_list_response.py
new file mode 100644
index 00000000..75c45a95
--- /dev/null
+++ b/src/gradient/types/agents/version_list_response.py
@@ -0,0 +1,175 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from pydantic import Field as FieldInfo
+
+from ..._models import BaseModel
+from ..shared.api_meta import APIMeta
+from ..shared.api_links import APILinks
+from ..api_retrieval_method import APIRetrievalMethod
+
+__all__ = [
+ "VersionListResponse",
+ "AgentVersion",
+ "AgentVersionAttachedChildAgent",
+ "AgentVersionAttachedFunction",
+ "AgentVersionAttachedGuardrail",
+ "AgentVersionAttachedKnowledgebase",
+]
+
+
+class AgentVersionAttachedChildAgent(BaseModel):
+ agent_name: Optional[str] = None
+ """Name of the child agent"""
+
+ child_agent_uuid: Optional[str] = None
+ """Child agent unique identifier"""
+
+ if_case: Optional[str] = None
+ """If case"""
+
+ is_deleted: Optional[bool] = None
+ """Child agent is deleted"""
+
+ route_name: Optional[str] = None
+ """Route name"""
+
+
+class AgentVersionAttachedFunction(BaseModel):
+ """Function represents a function configuration for an agent"""
+
+ description: Optional[str] = None
+ """Description of the function"""
+
+ faas_name: Optional[str] = None
+ """FaaS name of the function"""
+
+ faas_namespace: Optional[str] = None
+ """FaaS namespace of the function"""
+
+ is_deleted: Optional[bool] = None
+ """Whether the function is deleted"""
+
+ name: Optional[str] = None
+ """Name of the function"""
+
+
+class AgentVersionAttachedGuardrail(BaseModel):
+ """Agent Guardrail version"""
+
+ is_deleted: Optional[bool] = None
+ """Whether the guardrail is deleted"""
+
+ name: Optional[str] = None
+ """Guardrail Name"""
+
+ priority: Optional[int] = None
+ """Guardrail Priority"""
+
+ uuid: Optional[str] = None
+ """Guardrail UUID"""
+
+
+class AgentVersionAttachedKnowledgebase(BaseModel):
+ is_deleted: Optional[bool] = None
+ """Deletet at date / time"""
+
+ name: Optional[str] = None
+ """Name of the knowledge base"""
+
+ uuid: Optional[str] = None
+ """Unique id of the knowledge base"""
+
+
+class AgentVersion(BaseModel):
+ """Represents an AgentVersion entity"""
+
+ id: Optional[str] = None
+ """Unique identifier"""
+
+ agent_uuid: Optional[str] = None
+ """Uuid of the agent this version belongs to"""
+
+ attached_child_agents: Optional[List[AgentVersionAttachedChildAgent]] = None
+ """List of child agent relationships"""
+
+ attached_functions: Optional[List[AgentVersionAttachedFunction]] = None
+ """List of function versions"""
+
+ attached_guardrails: Optional[List[AgentVersionAttachedGuardrail]] = None
+ """List of guardrail version"""
+
+ attached_knowledgebases: Optional[List[AgentVersionAttachedKnowledgebase]] = None
+ """List of knowledge base agent versions"""
+
+ can_rollback: Optional[bool] = None
+ """Whether the version is able to be rolled back to"""
+
+ created_at: Optional[datetime] = None
+ """Creation date"""
+
+ created_by_email: Optional[str] = None
+ """User who created this version"""
+
+ currently_applied: Optional[bool] = None
+ """Whether this is the currently applied configuration"""
+
+ description: Optional[str] = None
+ """Description of the agent"""
+
+ instruction: Optional[str] = None
+ """Instruction for the agent"""
+
+ k: Optional[int] = None
+ """K value for the agent's configuration"""
+
+ max_tokens: Optional[int] = None
+ """Max tokens setting for the agent"""
+
+ model: Optional[str] = FieldInfo(alias="model_name", default=None)
+ """Name of model associated to the agent version"""
+
+ name: Optional[str] = None
+ """Name of the agent"""
+
+ provide_citations: Optional[bool] = None
+ """Whether the agent should provide in-response citations"""
+
+ retrieval_method: Optional[APIRetrievalMethod] = None
+ """
+ - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown
+ - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite
+ - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back
+ - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries
+ - RETRIEVAL_METHOD_NONE: The retrieval method is none
+ """
+
+ tags: Optional[List[str]] = None
+ """Tags associated with the agent"""
+
+ temperature: Optional[float] = None
+ """Temperature setting for the agent"""
+
+ top_p: Optional[float] = None
+ """Top_p setting for the agent"""
+
+ trigger_action: Optional[str] = None
+ """Action triggering the configuration update"""
+
+ version_hash: Optional[str] = None
+ """Version hash"""
+
+
+class VersionListResponse(BaseModel):
+ """List of agent versions"""
+
+ agent_versions: Optional[List[AgentVersion]] = None
+ """Agents"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/agents/version_update_params.py b/src/gradient/types/agents/version_update_params.py
new file mode 100644
index 00000000..212eb05c
--- /dev/null
+++ b/src/gradient/types/agents/version_update_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+
+__all__ = ["VersionUpdateParams"]
+
+
+class VersionUpdateParams(TypedDict, total=False):
+ body_uuid: Annotated[str, PropertyInfo(alias="uuid")]
+ """Agent unique identifier"""
+
+ version_hash: str
+ """Unique identifier"""
diff --git a/src/gradient/types/agents/version_update_response.py b/src/gradient/types/agents/version_update_response.py
new file mode 100644
index 00000000..ee1188d1
--- /dev/null
+++ b/src/gradient/types/agents/version_update_response.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["VersionUpdateResponse", "AuditHeader"]
+
+
+class AuditHeader(BaseModel):
+ """An alternative way to provide auth information. for internal use only."""
+
+ actor_id: Optional[str] = None
+
+ actor_ip: Optional[str] = None
+
+ actor_uuid: Optional[str] = None
+
+ context_urn: Optional[str] = None
+
+ origin_application: Optional[str] = None
+
+ user_id: Optional[str] = None
+
+ user_uuid: Optional[str] = None
+
+
+class VersionUpdateResponse(BaseModel):
+ audit_header: Optional[AuditHeader] = None
+ """An alternative way to provide auth information. for internal use only."""
+
+ version_hash: Optional[str] = None
+ """Unique identifier"""
diff --git a/src/gradient/types/api_agent.py b/src/gradient/types/api_agent.py
new file mode 100644
index 00000000..e3fb21f2
--- /dev/null
+++ b/src/gradient/types/api_agent.py
@@ -0,0 +1,434 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+from .api_agent_model import APIAgentModel
+from .api_knowledge_base import APIKnowledgeBase
+from .api_retrieval_method import APIRetrievalMethod
+from .api_agent_api_key_info import APIAgentAPIKeyInfo
+from .api_openai_api_key_info import APIOpenAIAPIKeyInfo
+from .api_deployment_visibility import APIDeploymentVisibility
+from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = [
+ "APIAgent",
+ "APIKey",
+ "Chatbot",
+ "ChatbotIdentifier",
+ "Deployment",
+ "Function",
+ "Guardrail",
+ "LoggingConfig",
+ "ModelProviderKey",
+ "Template",
+ "TemplateGuardrail",
+]
+
+
+class APIKey(BaseModel):
+ """Agent API Key"""
+
+ api_key: Optional[str] = None
+ """Api key"""
+
+
+class Chatbot(BaseModel):
+ """A Chatbot"""
+
+ allowed_domains: Optional[List[str]] = None
+
+ button_background_color: Optional[str] = None
+
+ logo: Optional[str] = None
+
+ name: Optional[str] = None
+ """Name of chatbot"""
+
+ primary_color: Optional[str] = None
+
+ secondary_color: Optional[str] = None
+
+ starting_message: Optional[str] = None
+
+
+class ChatbotIdentifier(BaseModel):
+ """Agent Chatbot Identifier"""
+
+ agent_chatbot_identifier: Optional[str] = None
+ """Agent chatbot identifier"""
+
+
+class Deployment(BaseModel):
+ """Description of deployment"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ name: Optional[str] = None
+ """Name"""
+
+ status: Optional[
+ Literal[
+ "STATUS_UNKNOWN",
+ "STATUS_WAITING_FOR_DEPLOYMENT",
+ "STATUS_DEPLOYING",
+ "STATUS_RUNNING",
+ "STATUS_FAILED",
+ "STATUS_WAITING_FOR_UNDEPLOYMENT",
+ "STATUS_UNDEPLOYING",
+ "STATUS_UNDEPLOYMENT_FAILED",
+ "STATUS_DELETED",
+ "STATUS_BUILDING",
+ ]
+ ] = None
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ url: Optional[str] = None
+ """Access your deployed agent here"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
+
+ visibility: Optional[APIDeploymentVisibility] = None
+ """
+ - VISIBILITY_UNKNOWN: The status of the deployment is unknown
+ - VISIBILITY_DISABLED: The deployment is disabled and will no longer service
+ requests
+ - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state
+ - VISIBILITY_PUBLIC: The deployment is public and will service requests from the
+ public internet
+ - VISIBILITY_PRIVATE: The deployment is private and will only service requests
+ from other agents, or through API keys
+ """
+
+
+class Function(BaseModel):
+ """Description missing"""
+
+ api_key: Optional[str] = None
+ """Api key"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ created_by: Optional[str] = None
+ """Created by user id from DO"""
+
+ description: Optional[str] = None
+ """Agent description"""
+
+ faas_name: Optional[str] = None
+
+ faas_namespace: Optional[str] = None
+
+ input_schema: Optional[object] = None
+
+ name: Optional[str] = None
+ """Name"""
+
+ output_schema: Optional[object] = None
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ url: Optional[str] = None
+ """Download your agent here"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
+
+
+class Guardrail(BaseModel):
+ """A Agent Guardrail"""
+
+ agent_uuid: Optional[str] = None
+
+ created_at: Optional[datetime] = None
+
+ default_response: Optional[str] = None
+
+ description: Optional[str] = None
+
+ guardrail_uuid: Optional[str] = None
+
+ is_attached: Optional[bool] = None
+
+ is_default: Optional[bool] = None
+
+ metadata: Optional[object] = None
+
+ name: Optional[str] = None
+
+ priority: Optional[int] = None
+
+ type: Optional[
+ Literal[
+ "GUARDRAIL_TYPE_UNKNOWN",
+ "GUARDRAIL_TYPE_JAILBREAK",
+ "GUARDRAIL_TYPE_SENSITIVE_DATA",
+ "GUARDRAIL_TYPE_CONTENT_MODERATION",
+ ]
+ ] = None
+
+ updated_at: Optional[datetime] = None
+
+ uuid: Optional[str] = None
+
+
+class LoggingConfig(BaseModel):
+ galileo_project_id: Optional[str] = None
+ """Galileo project identifier"""
+
+ galileo_project_name: Optional[str] = None
+ """Name of the Galileo project"""
+
+ insights_enabled: Optional[bool] = None
+ """Whether insights are enabled"""
+
+ insights_enabled_at: Optional[datetime] = None
+ """Timestamp when insights were enabled"""
+
+ log_stream_id: Optional[str] = None
+ """Identifier for the log stream"""
+
+ log_stream_name: Optional[str] = None
+ """Name of the log stream"""
+
+
+class ModelProviderKey(BaseModel):
+ api_key_uuid: Optional[str] = None
+ """API key ID"""
+
+ created_at: Optional[datetime] = None
+ """Key creation date"""
+
+ created_by: Optional[str] = None
+ """Created by user id from DO"""
+
+ deleted_at: Optional[datetime] = None
+ """Key deleted date"""
+
+ models: Optional[List[APIAgentModel]] = None
+ """Models supported by the openAI api key"""
+
+ name: Optional[str] = None
+ """Name of the key"""
+
+ provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = (
+ None
+ )
+
+ updated_at: Optional[datetime] = None
+ """Key last updated date"""
+
+
+class TemplateGuardrail(BaseModel):
+ priority: Optional[int] = None
+ """Priority of the guardrail"""
+
+ uuid: Optional[str] = None
+ """Uuid of the guardrail"""
+
+
+class Template(BaseModel):
+ """Represents an AgentTemplate entity"""
+
+ created_at: Optional[datetime] = None
+ """The agent template's creation date"""
+
+ description: Optional[str] = None
+ """Deprecated - Use summary instead"""
+
+ guardrails: Optional[List[TemplateGuardrail]] = None
+ """List of guardrails associated with the agent template"""
+
+ instruction: Optional[str] = None
+ """Instructions for the agent template"""
+
+ k: Optional[int] = None
+ """The 'k' value for the agent template"""
+
+ knowledge_bases: Optional[List[APIKnowledgeBase]] = None
+ """List of knowledge bases associated with the agent template"""
+
+ long_description: Optional[str] = None
+ """The long description of the agent template"""
+
+ max_tokens: Optional[int] = None
+ """The max_tokens setting for the agent template"""
+
+ model: Optional[APIAgentModel] = None
+ """Description of a Model"""
+
+ name: Optional[str] = None
+ """Name of the agent template"""
+
+ short_description: Optional[str] = None
+ """The short description of the agent template"""
+
+ summary: Optional[str] = None
+ """The summary of the agent template"""
+
+ tags: Optional[List[str]] = None
+ """List of tags associated with the agent template"""
+
+ temperature: Optional[float] = None
+ """The temperature setting for the agent template"""
+
+ template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None
+ """
+ - AGENT_TEMPLATE_TYPE_STANDARD: The standard agent template
+ - AGENT_TEMPLATE_TYPE_ONE_CLICK: The one click agent template
+ """
+
+ top_p: Optional[float] = None
+ """The top_p setting for the agent template"""
+
+ updated_at: Optional[datetime] = None
+ """The agent template's last updated date"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
+
+
+class APIAgent(BaseModel):
+ """An Agent"""
+
+ anthropic_api_key: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
+
+ api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None
+ """Api key infos"""
+
+ api_keys: Optional[List[APIKey]] = None
+ """Api keys"""
+
+ chatbot: Optional[Chatbot] = None
+ """A Chatbot"""
+
+ chatbot_identifiers: Optional[List[ChatbotIdentifier]] = None
+ """Chatbot identifiers"""
+
+ child_agents: Optional[List["APIAgent"]] = None
+ """Child agents"""
+
+ conversation_logs_enabled: Optional[bool] = None
+ """Whether conversation logs are enabled for the agent"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ deployment: Optional[Deployment] = None
+ """Description of deployment"""
+
+ description: Optional[str] = None
+ """Description of agent"""
+
+ functions: Optional[List[Function]] = None
+
+ guardrails: Optional[List[Guardrail]] = None
+ """The guardrails the agent is attached to"""
+
+ if_case: Optional[str] = None
+
+ instruction: Optional[str] = None
+ """Agent instruction.
+
+ Instructions help your agent to perform its job effectively. See
+ [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions)
+ for best practices.
+ """
+
+ k: Optional[int] = None
+
+ knowledge_bases: Optional[List[APIKnowledgeBase]] = None
+ """Knowledge bases"""
+
+ logging_config: Optional[LoggingConfig] = None
+
+ max_tokens: Optional[int] = None
+
+ model: Optional[APIAgentModel] = None
+ """Description of a Model"""
+
+ api_model_provider_key: Optional[ModelProviderKey] = FieldInfo(alias="model_provider_key", default=None)
+
+ name: Optional[str] = None
+ """Agent name"""
+
+ openai_api_key: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
+
+ parent_agents: Optional[List["APIAgent"]] = None
+ """Parent agents"""
+
+ project_id: Optional[str] = None
+
+ provide_citations: Optional[bool] = None
+ """Whether the agent should provide in-response citations"""
+
+ region: Optional[str] = None
+ """Region code"""
+
+ retrieval_method: Optional[APIRetrievalMethod] = None
+ """
+ - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown
+ - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite
+ - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back
+ - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries
+ - RETRIEVAL_METHOD_NONE: The retrieval method is none
+ """
+
+ route_created_at: Optional[datetime] = None
+ """Creation of route date / time"""
+
+ route_created_by: Optional[str] = None
+
+ route_name: Optional[str] = None
+ """Route name"""
+
+ route_uuid: Optional[str] = None
+
+ tags: Optional[List[str]] = None
+ """Agent tag to organize related resources"""
+
+ temperature: Optional[float] = None
+
+ template: Optional[Template] = None
+ """Represents an AgentTemplate entity"""
+
+ top_p: Optional[float] = None
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ url: Optional[str] = None
+ """Access your agent under this url"""
+
+ user_id: Optional[str] = None
+ """Id of user that created the agent"""
+
+ uuid: Optional[str] = None
+ """Unique agent id"""
+
+ version_hash: Optional[str] = None
+ """The latest version of the agent"""
+
+ vpc_egress_ips: Optional[List[str]] = None
+ """VPC Egress IPs"""
+
+ vpc_uuid: Optional[str] = None
+
+ workspace: Optional["APIWorkspace"] = None
+
+
+from .api_workspace import APIWorkspace
diff --git a/src/gradient/types/api_agent_api_key_info.py b/src/gradient/types/api_agent_api_key_info.py
new file mode 100644
index 00000000..06bd0fda
--- /dev/null
+++ b/src/gradient/types/api_agent_api_key_info.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+
+from .._models import BaseModel
+
+__all__ = ["APIAgentAPIKeyInfo"]
+
+
+class APIAgentAPIKeyInfo(BaseModel):
+ """Agent API Key Info"""
+
+ created_at: Optional[datetime] = None
+ """Creation date"""
+
+ created_by: Optional[str] = None
+ """Created by"""
+
+ deleted_at: Optional[datetime] = None
+ """Deleted date"""
+
+ name: Optional[str] = None
+ """Name"""
+
+ secret_key: Optional[str] = None
+
+ uuid: Optional[str] = None
+ """Uuid"""
diff --git a/src/gradient/types/api_agent_model.py b/src/gradient/types/api_agent_model.py
new file mode 100644
index 00000000..a6a36c6f
--- /dev/null
+++ b/src/gradient/types/api_agent_model.py
@@ -0,0 +1,82 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .api_agreement import APIAgreement
+from .api_model_version import APIModelVersion
+
+__all__ = ["APIAgentModel"]
+
+
+class APIAgentModel(BaseModel):
+ """Description of a Model"""
+
+ agreement: Optional[APIAgreement] = None
+ """Agreement Description"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ inference_name: Optional[str] = None
+ """Internally used name"""
+
+ inference_version: Optional[str] = None
+ """Internally used version"""
+
+ is_foundational: Optional[bool] = None
+ """True if it is a foundational model provided by do"""
+
+ kb_default_chunk_size: Optional[int] = None
+ """Default chunking size limit to show in UI"""
+
+ kb_max_chunk_size: Optional[int] = None
+ """Maximum chunk size limit of model"""
+
+ kb_min_chunk_size: Optional[int] = None
+ """Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase"""
+
+ metadata: Optional[object] = None
+ """Additional meta data"""
+
+ name: Optional[str] = None
+ """Name of the model"""
+
+ parent_uuid: Optional[str] = None
+ """Unique id of the model, this model is based on"""
+
+ provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = (
+ None
+ )
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ upload_complete: Optional[bool] = None
+ """Model has been fully uploaded"""
+
+ url: Optional[str] = None
+ """Download url"""
+
+ usecases: Optional[
+ List[
+ Literal[
+ "MODEL_USECASE_UNKNOWN",
+ "MODEL_USECASE_AGENT",
+ "MODEL_USECASE_FINETUNED",
+ "MODEL_USECASE_KNOWLEDGEBASE",
+ "MODEL_USECASE_GUARDRAIL",
+ "MODEL_USECASE_REASONING",
+ "MODEL_USECASE_SERVERLESS",
+ ]
+ ]
+ ] = None
+ """Usecases of the model"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
+
+ version: Optional[APIModelVersion] = None
+ """Version Information about a Model"""
diff --git a/src/gradient/types/api_agreement.py b/src/gradient/types/api_agreement.py
new file mode 100644
index 00000000..8eca3c3c
--- /dev/null
+++ b/src/gradient/types/api_agreement.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["APIAgreement"]
+
+
+class APIAgreement(BaseModel):
+ """Agreement Description"""
+
+ description: Optional[str] = None
+
+ name: Optional[str] = None
+
+ url: Optional[str] = None
+
+ uuid: Optional[str] = None
diff --git a/src/gradient/types/api_anthropic_api_key_info.py b/src/gradient/types/api_anthropic_api_key_info.py
new file mode 100644
index 00000000..bf13fd60
--- /dev/null
+++ b/src/gradient/types/api_anthropic_api_key_info.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+
+from .._models import BaseModel
+
+__all__ = ["APIAnthropicAPIKeyInfo"]
+
+
+class APIAnthropicAPIKeyInfo(BaseModel):
+ """Anthropic API Key Info"""
+
+ created_at: Optional[datetime] = None
+ """Key creation date"""
+
+ created_by: Optional[str] = None
+ """Created by user id from DO"""
+
+ deleted_at: Optional[datetime] = None
+ """Key deleted date"""
+
+ name: Optional[str] = None
+ """Name"""
+
+ updated_at: Optional[datetime] = None
+ """Key last updated date"""
+
+ uuid: Optional[str] = None
+ """Uuid"""
diff --git a/src/gradient/types/api_deployment_visibility.py b/src/gradient/types/api_deployment_visibility.py
new file mode 100644
index 00000000..a63e3acd
--- /dev/null
+++ b/src/gradient/types/api_deployment_visibility.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["APIDeploymentVisibility"]
+
+APIDeploymentVisibility: TypeAlias = Literal[
+ "VISIBILITY_UNKNOWN", "VISIBILITY_DISABLED", "VISIBILITY_PLAYGROUND", "VISIBILITY_PUBLIC", "VISIBILITY_PRIVATE"
+]
diff --git a/src/gradient/types/api_knowledge_base.py b/src/gradient/types/api_knowledge_base.py
new file mode 100644
index 00000000..e64f9336
--- /dev/null
+++ b/src/gradient/types/api_knowledge_base.py
@@ -0,0 +1,49 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from .._models import BaseModel
+from .knowledge_bases.api_indexing_job import APIIndexingJob
+
+__all__ = ["APIKnowledgeBase"]
+
+
+class APIKnowledgeBase(BaseModel):
+ """Knowledgebase Description"""
+
+ added_to_agent_at: Optional[datetime] = None
+ """Time when the knowledge base was added to the agent"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ database_id: Optional[str] = None
+
+ embedding_model_uuid: Optional[str] = None
+
+ is_public: Optional[bool] = None
+ """Whether the knowledge base is public or not"""
+
+ last_indexing_job: Optional[APIIndexingJob] = None
+ """IndexingJob description"""
+
+ name: Optional[str] = None
+ """Name of knowledge base"""
+
+ project_id: Optional[str] = None
+
+ region: Optional[str] = None
+ """Region code"""
+
+ tags: Optional[List[str]] = None
+ """Tags to organize related resources"""
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ user_id: Optional[str] = None
+ """Id of user that created the knowledge base"""
+
+ uuid: Optional[str] = None
+ """Unique id for knowledge base"""
diff --git a/src/gradient/types/api_model.py b/src/gradient/types/api_model.py
new file mode 100644
index 00000000..1d9752e4
--- /dev/null
+++ b/src/gradient/types/api_model.py
@@ -0,0 +1,56 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+
+from .._models import BaseModel
+from .api_agreement import APIAgreement
+from .api_model_version import APIModelVersion
+
+__all__ = ["APIModel"]
+
+
+class APIModel(BaseModel):
+ """A machine learning model stored on the GenAI platform"""
+
+ id: Optional[str] = None
+ """Human-readable model identifier"""
+
+ agreement: Optional[APIAgreement] = None
+ """Agreement Description"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ is_foundational: Optional[bool] = None
+ """True if it is a foundational model provided by do"""
+
+ kb_default_chunk_size: Optional[int] = None
+ """Default chunking size limit to show in UI"""
+
+ kb_max_chunk_size: Optional[int] = None
+ """Maximum chunk size limit of model"""
+
+ kb_min_chunk_size: Optional[int] = None
+ """Minimum chunking size token limits if model supports KNOWLEDGEBASE usecase"""
+
+ name: Optional[str] = None
+ """Display name of the model"""
+
+ parent_uuid: Optional[str] = None
+ """Unique id of the model, this model is based on"""
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ upload_complete: Optional[bool] = None
+ """Model has been fully uploaded"""
+
+ url: Optional[str] = None
+ """Download url"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
+
+ version: Optional[APIModelVersion] = None
+ """Version Information about a Model"""
diff --git a/src/gradient/types/api_model_version.py b/src/gradient/types/api_model_version.py
new file mode 100644
index 00000000..3989e256
--- /dev/null
+++ b/src/gradient/types/api_model_version.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["APIModelVersion"]
+
+
+class APIModelVersion(BaseModel):
+ """Version Information about a Model"""
+
+ major: Optional[int] = None
+ """Major version number"""
+
+ minor: Optional[int] = None
+ """Minor version number"""
+
+ patch: Optional[int] = None
+ """Patch version number"""
diff --git a/src/gradient/types/api_openai_api_key_info.py b/src/gradient/types/api_openai_api_key_info.py
new file mode 100644
index 00000000..69e9b138
--- /dev/null
+++ b/src/gradient/types/api_openai_api_key_info.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from .._models import BaseModel
+from .api_agent_model import APIAgentModel
+
+__all__ = ["APIOpenAIAPIKeyInfo"]
+
+
+class APIOpenAIAPIKeyInfo(BaseModel):
+ """OpenAI API Key Info"""
+
+ created_at: Optional[datetime] = None
+ """Key creation date"""
+
+ created_by: Optional[str] = None
+ """Created by user id from DO"""
+
+ deleted_at: Optional[datetime] = None
+ """Key deleted date"""
+
+ models: Optional[List[APIAgentModel]] = None
+ """Models supported by the openAI api key"""
+
+ name: Optional[str] = None
+ """Name"""
+
+ updated_at: Optional[datetime] = None
+ """Key last updated date"""
+
+ uuid: Optional[str] = None
+ """Uuid"""
diff --git a/src/gradient/types/api_retrieval_method.py b/src/gradient/types/api_retrieval_method.py
new file mode 100644
index 00000000..9d92838e
--- /dev/null
+++ b/src/gradient/types/api_retrieval_method.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["APIRetrievalMethod"]
+
+APIRetrievalMethod: TypeAlias = Literal[
+ "RETRIEVAL_METHOD_UNKNOWN",
+ "RETRIEVAL_METHOD_REWRITE",
+ "RETRIEVAL_METHOD_STEP_BACK",
+ "RETRIEVAL_METHOD_SUB_QUERIES",
+ "RETRIEVAL_METHOD_NONE",
+]
diff --git a/src/gradient/types/api_workspace.py b/src/gradient/types/api_workspace.py
new file mode 100644
index 00000000..564fabb6
--- /dev/null
+++ b/src/gradient/types/api_workspace.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from datetime import datetime
+
+from .._models import BaseModel
+from .agents.api_evaluation_test_case import APIEvaluationTestCase
+
+__all__ = ["APIWorkspace"]
+
+
+class APIWorkspace(BaseModel):
+ agents: Optional[List["APIAgent"]] = None
+ """Agents"""
+
+ created_at: Optional[datetime] = None
+ """Creation date"""
+
+ created_by: Optional[str] = None
+ """The id of user who created this workspace"""
+
+ created_by_email: Optional[str] = None
+ """The email of the user who created this workspace"""
+
+ deleted_at: Optional[datetime] = None
+ """Deleted date"""
+
+ description: Optional[str] = None
+ """Description of the workspace"""
+
+ evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None
+ """Evaluations"""
+
+ name: Optional[str] = None
+ """Name of the workspace"""
+
+ updated_at: Optional[datetime] = None
+ """Update date"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
+
+
+from .api_agent import APIAgent
diff --git a/src/gradient/types/apps/__init__.py b/src/gradient/types/apps/__init__.py
new file mode 100644
index 00000000..b4a2e426
--- /dev/null
+++ b/src/gradient/types/apps/__init__.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .job_invocation_cancel_params import JobInvocationCancelParams as JobInvocationCancelParams
+from .job_invocation_cancel_response import JobInvocationCancelResponse as JobInvocationCancelResponse
diff --git a/src/gradient/types/apps/job_invocation_cancel_params.py b/src/gradient/types/apps/job_invocation_cancel_params.py
new file mode 100644
index 00000000..b026fcb9
--- /dev/null
+++ b/src/gradient/types/apps/job_invocation_cancel_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["JobInvocationCancelParams"]
+
+
+class JobInvocationCancelParams(TypedDict, total=False):
+ app_id: Required[str]
+
+ job_name: str
+ """The job name to list job invocations for."""
diff --git a/src/gradient/types/apps/job_invocation_cancel_response.py b/src/gradient/types/apps/job_invocation_cancel_response.py
new file mode 100644
index 00000000..96b2c642
--- /dev/null
+++ b/src/gradient/types/apps/job_invocation_cancel_response.py
@@ -0,0 +1,77 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = [
+ "JobInvocationCancelResponse",
+ "Trigger",
+ "TriggerManual",
+ "TriggerManualUser",
+ "TriggerScheduled",
+ "TriggerScheduledSchedule",
+]
+
+
+class TriggerManualUser(BaseModel):
+ """The user who triggered the job"""
+
+ email: Optional[str] = None
+
+ full_name: Optional[str] = None
+
+ uuid: Optional[str] = None
+
+
+class TriggerManual(BaseModel):
+ """Details about the manual trigger, if applicable"""
+
+ user: Optional[TriggerManualUser] = None
+ """The user who triggered the job"""
+
+
+class TriggerScheduledSchedule(BaseModel):
+ cron: Optional[str] = None
+ """The cron expression defining the schedule"""
+
+ time_zone: Optional[str] = None
+ """The time zone for the schedule"""
+
+
+class TriggerScheduled(BaseModel):
+ """The schedule for the job"""
+
+ schedule: Optional[TriggerScheduledSchedule] = None
+
+
+class Trigger(BaseModel):
+ manual: Optional[TriggerManual] = None
+ """Details about the manual trigger, if applicable"""
+
+ scheduled: Optional[TriggerScheduled] = None
+ """The schedule for the job"""
+
+ type: Optional[Literal["MANUAL", "SCHEDULE", "UNKNOWN"]] = None
+ """The type of trigger that initiated the job invocation."""
+
+
+class JobInvocationCancelResponse(BaseModel):
+ id: Optional[str] = None
+
+ completed_at: Optional[datetime] = None
+
+ created_at: Optional[datetime] = None
+
+ deployment_id: Optional[str] = None
+
+ job_name: Optional[str] = None
+
+ phase: Optional[Literal["UNKNOWN", "PENDING", "RUNNING", "SUCCEEDED", "FAILED", "CANCELED", "SKIPPED"]] = None
+ """The phase of the job invocation"""
+
+ started_at: Optional[datetime] = None
+
+ trigger: Optional[Trigger] = None
diff --git a/src/gradient/types/billing_list_insights_params.py b/src/gradient/types/billing_list_insights_params.py
new file mode 100644
index 00000000..6e89dd1e
--- /dev/null
+++ b/src/gradient/types/billing_list_insights_params.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from datetime import date
+from typing_extensions import Required, Annotated, TypedDict
+
+from .._utils import PropertyInfo
+
+__all__ = ["BillingListInsightsParams"]
+
+
+class BillingListInsightsParams(TypedDict, total=False):
+ account_urn: Required[str]
+
+ start_date: Required[Annotated[Union[str, date], PropertyInfo(format="iso8601")]]
+
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/billing_list_insights_response.py b/src/gradient/types/billing_list_insights_response.py
new file mode 100644
index 00000000..f7515a6b
--- /dev/null
+++ b/src/gradient/types/billing_list_insights_response.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import date
+
+from .._models import BaseModel
+
+__all__ = ["BillingListInsightsResponse", "DataPoint"]
+
+
+class DataPoint(BaseModel):
+ description: Optional[str] = None
+ """Description of the billed resource or service as shown on an invoice item"""
+
+ group_description: Optional[str] = None
+ """
+ Optional invoice item group name of the billed resource or service, blank when
+ not part an invoice item group
+ """
+
+ region: Optional[str] = None
+ """Region where the usage occurred"""
+
+ sku: Optional[str] = None
+ """Unique SKU identifier for the billed resource"""
+
+ start_date: Optional[date] = None
+ """Start date of the billing data point in YYYY-MM-DD format"""
+
+ total_amount: Optional[str] = None
+ """Total amount for this data point in USD"""
+
+ usage_team_urn: Optional[str] = None
+ """URN of the team that incurred the usage"""
+
+
+class BillingListInsightsResponse(BaseModel):
+ current_page: int
+ """Current page number"""
+
+ data_points: List[DataPoint]
+ """
+ Array of billing data points, which are day-over-day changes in billing resource
+ usage based on nightly invoice item estimates, for the requested period
+ """
+
+ total_items: int
+ """Total number of items available across all pages"""
+
+ total_pages: int
+ """Total number of pages available"""
diff --git a/src/gradient/types/chat/__init__.py b/src/gradient/types/chat/__init__.py
new file mode 100644
index 00000000..9384ac14
--- /dev/null
+++ b/src/gradient/types/chat/__init__.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .completion_create_params import CompletionCreateParams as CompletionCreateParams
+from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
diff --git a/src/gradient/types/chat/completion_create_params.py b/src/gradient/types/chat/completion_create_params.py
new file mode 100644
index 00000000..925eea7e
--- /dev/null
+++ b/src/gradient/types/chat/completion_create_params.py
@@ -0,0 +1,800 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = [
+ "CompletionCreateParamsBase",
+ "Message",
+ "MessageChatCompletionRequestSystemMessage",
+ "MessageChatCompletionRequestSystemMessageContent",
+ "MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestSystemMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestDeveloperMessage",
+ "MessageChatCompletionRequestDeveloperMessageContent",
+ "MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestUserMessage",
+ "MessageChatCompletionRequestUserMessageContent",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURL",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURLImageURL",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURL",
+ "MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURLVideoURL",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURL",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURLImageURL",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURL",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURLVideoURL",
+ "MessageChatCompletionRequestAssistantMessage",
+ "MessageChatCompletionRequestAssistantMessageContent",
+ "MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestAssistantMessageToolCall",
+ "MessageChatCompletionRequestAssistantMessageToolCallFunction",
+ "MessageChatCompletionRequestToolMessage",
+ "MessageChatCompletionRequestToolMessageContent",
+ "MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartTextCacheControl",
+ "MessageChatCompletionRequestToolMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartText",
+ "MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl",
+ "StreamOptions",
+ "ToolChoice",
+ "ToolChoiceChatCompletionNamedToolChoice",
+ "ToolChoiceChatCompletionNamedToolChoiceFunction",
+ "Tool",
+ "ToolFunction",
+ "CompletionCreateParamsNonStreaming",
+ "CompletionCreateParamsStreaming",
+]
+
+
+class CompletionCreateParamsBase(TypedDict, total=False):
+ messages: Required[Iterable[Message]]
+ """A list of messages comprising the conversation so far."""
+
+ model: Required[str]
+ """Model ID used to generate the response."""
+
+ frequency_penalty: Optional[float]
+ """Number between -2.0 and 2.0.
+
+ Positive values penalize new tokens based on their existing frequency in the
+ text so far, decreasing the model's likelihood to repeat the same line verbatim.
+ """
+
+ logit_bias: Optional[Dict[str, int]]
+ """Modify the likelihood of specified tokens appearing in the completion.
+
+ Accepts a JSON object that maps tokens (specified by their token ID in the
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
+ bias is added to the logits generated by the model prior to sampling. The exact
+ effect will vary per model, but values between -1 and 1 should decrease or
+ increase likelihood of selection; values like -100 or 100 should result in a ban
+ or exclusive selection of the relevant token.
+ """
+
+ logprobs: Optional[bool]
+ """Whether to return log probabilities of the output tokens or not.
+
+ If true, returns the log probabilities of each output token returned in the
+ `content` of `message`.
+ """
+
+ max_completion_tokens: Optional[int]
+ """
+ The maximum number of completion tokens that may be used over the course of the
+ run. The run will make a best effort to use only the number of completion tokens
+ specified, across multiple turns of the run.
+ """
+
+ max_tokens: Optional[int]
+ """The maximum number of tokens that can be generated in the completion.
+
+ The token count of your prompt plus `max_tokens` cannot exceed the model's
+ context length.
+ """
+
+ metadata: Optional[Dict[str, str]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ n: Optional[int]
+ """How many chat completion choices to generate for each input message.
+
+ Note that you will be charged based on the number of generated tokens across all
+ of the choices. Keep `n` as `1` to minimize costs.
+ """
+
+ presence_penalty: Optional[float]
+ """Number between -2.0 and 2.0.
+
+ Positive values penalize new tokens based on whether they appear in the text so
+ far, increasing the model's likelihood to talk about new topics.
+ """
+
+ reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]]
+ """Constrains effort on reasoning for reasoning models.
+
+ Reducing reasoning effort can result in faster responses and fewer tokens used
+ on reasoning in a response.
+ """
+
+ stop: Union[Optional[str], SequenceNotStr[str], None]
+ """Up to 4 sequences where the API will stop generating further tokens.
+
+ The returned text will not contain the stop sequence.
+ """
+
+ stream_options: Optional[StreamOptions]
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. We generally recommend altering
+ this or `top_p` but not both.
+ """
+
+ tool_choice: ToolChoice
+ """
+ Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+ """
+
+ tools: Iterable[Tool]
+ """A list of tools the model may call.
+
+ Currently, only functions are supported as a tool.
+ """
+
+ top_logprobs: Optional[int]
+ """
+ An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ `logprobs` must be set to `true` if this parameter is used.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+ """
+
+ user: str
+ """
+ A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+ """
+
+
+class MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartText(TypedDict, total=False):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartText(
+ TypedDict, total=False
+):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+MessageChatCompletionRequestSystemMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestSystemMessageContentArrayOfContentPartChatCompletionRequestContentPartText
+]
+
+MessageChatCompletionRequestSystemMessageContent: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestSystemMessageContentChatCompletionRequestContentPartText,
+ SequenceNotStr[MessageChatCompletionRequestSystemMessageContentArrayOfContentPart],
+]
+
+
+class MessageChatCompletionRequestSystemMessage(TypedDict, total=False):
+ """
+ System-provided instructions that the model should follow, regardless of
+ messages sent by the user.
+ """
+
+ content: Required[MessageChatCompletionRequestSystemMessageContent]
+ """The contents of the system message."""
+
+ role: Required[Literal["system"]]
+ """The role of the messages author, in this case `system`."""
+
+
+class MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartText(TypedDict, total=False):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartText(
+ TypedDict, total=False
+):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartChatCompletionRequestContentPartText
+]
+
+MessageChatCompletionRequestDeveloperMessageContent: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestDeveloperMessageContentChatCompletionRequestContentPartText,
+ SequenceNotStr[MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart],
+]
+
+
+class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
+ """
+ Developer-provided instructions that the model should follow, regardless of
+ messages sent by the user.
+ """
+
+ content: Required[MessageChatCompletionRequestDeveloperMessageContent]
+ """The contents of the developer message."""
+
+ role: Required[Literal["developer"]]
+ """The role of the messages author, in this case `developer`."""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartText(TypedDict, total=False):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURLImageURL(
+ TypedDict, total=False
+):
+ """Image URL settings."""
+
+ url: Required[str]
+ """A URL or data URL containing image content."""
+
+ detail: Literal["auto", "low", "high"]
+ """Optional detail level for image understanding."""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURL(TypedDict, total=False):
+ """Content part with type and image URL."""
+
+ image_url: Required[MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURLImageURL]
+ """Image URL settings."""
+
+ type: Required[Literal["image_url"]]
+ """The type of content part"""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURLVideoURL(
+ TypedDict, total=False
+):
+ """Video URL settings."""
+
+ url: Required[str]
+ """A URL or data URL containing video content."""
+
+
+class MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURL(TypedDict, total=False):
+ """Content part with type and video URL."""
+
+ type: Required[Literal["video_url"]]
+ """The type of content part"""
+
+ video_url: Required[MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURLVideoURL]
+ """Video URL settings."""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartText(
+ TypedDict, total=False
+):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: (
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl
+ )
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURLImageURL(
+ TypedDict, total=False
+):
+ """Image URL settings."""
+
+ url: Required[str]
+ """A URL or data URL containing image content."""
+
+ detail: Literal["auto", "low", "high"]
+ """Optional detail level for image understanding."""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURL(
+ TypedDict, total=False
+):
+ """Content part with type and image URL."""
+
+ image_url: Required[
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURLImageURL
+ ]
+ """Image URL settings."""
+
+ type: Required[Literal["image_url"]]
+ """The type of content part"""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURLVideoURL(
+ TypedDict, total=False
+):
+ """Video URL settings."""
+
+ url: Required[str]
+ """A URL or data URL containing video content."""
+
+
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURL(
+ TypedDict, total=False
+):
+ """Content part with type and video URL."""
+
+ type: Required[Literal["video_url"]]
+ """The type of content part"""
+
+ video_url: Required[
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURLVideoURL
+ ]
+ """Video URL settings."""
+
+
+MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartText,
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartImageURL,
+ MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestContentPartVideoURL,
+]
+
+MessageChatCompletionRequestUserMessageContent: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartText,
+ MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartImageURL,
+ MessageChatCompletionRequestUserMessageContentChatCompletionRequestContentPartVideoURL,
+ SequenceNotStr[MessageChatCompletionRequestUserMessageContentArrayOfContentPart],
+]
+
+
+class MessageChatCompletionRequestUserMessage(TypedDict, total=False):
+ """
+ Messages sent by an end user, containing prompts or additional context
+ information.
+ """
+
+ content: Required[MessageChatCompletionRequestUserMessageContent]
+ """The contents of the user message."""
+
+ role: Required[Literal["user"]]
+ """The role of the messages author, in this case `user`."""
+
+
+class MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartText(TypedDict, total=False):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartText(
+ TypedDict, total=False
+):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestContentPartText
+]
+
+MessageChatCompletionRequestAssistantMessageContent: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestAssistantMessageContentChatCompletionRequestContentPartText,
+ SequenceNotStr[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart],
+]
+
+
+class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False):
+ """The function that the model called."""
+
+ arguments: Required[str]
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: Required[str]
+ """The name of the function to call."""
+
+
+class MessageChatCompletionRequestAssistantMessageToolCall(TypedDict, total=False):
+ id: Required[str]
+ """The ID of the tool call."""
+
+ function: Required[MessageChatCompletionRequestAssistantMessageToolCallFunction]
+ """The function that the model called."""
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False):
+ """Messages sent by the model in response to user messages."""
+
+ role: Required[Literal["assistant"]]
+ """The role of the messages author, in this case `assistant`."""
+
+ content: Optional[MessageChatCompletionRequestAssistantMessageContent]
+ """The contents of the assistant message."""
+
+ tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall]
+ """The tool calls generated by the model, such as function calls."""
+
+
+class MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartText(TypedDict, total=False):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartTextCacheControl
+ """Cache control settings for the content part."""
+
+
+class MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl(
+ TypedDict, total=False
+):
+ """Cache control settings for the content part."""
+
+ type: Required[Literal["ephemeral"]]
+ """The cache control type."""
+
+ ttl: Literal["5m", "1h"]
+ """The cache TTL."""
+
+
+class MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartText(
+ TypedDict, total=False
+):
+ """Content part with type and text"""
+
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+ cache_control: (
+ MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartTextCacheControl
+ )
+ """Cache control settings for the content part."""
+
+
+MessageChatCompletionRequestToolMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestToolMessageContentArrayOfContentPartChatCompletionRequestContentPartText
+]
+
+MessageChatCompletionRequestToolMessageContent: TypeAlias = Union[
+ str,
+ MessageChatCompletionRequestToolMessageContentChatCompletionRequestContentPartText,
+ SequenceNotStr[MessageChatCompletionRequestToolMessageContentArrayOfContentPart],
+]
+
+
+class MessageChatCompletionRequestToolMessage(TypedDict, total=False):
+ content: Required[MessageChatCompletionRequestToolMessageContent]
+ """The contents of the tool message."""
+
+ role: Required[Literal["tool"]]
+ """The role of the messages author, in this case `tool`."""
+
+ tool_call_id: Required[str]
+ """Tool call that this message is responding to."""
+
+
+Message: TypeAlias = Union[
+ MessageChatCompletionRequestSystemMessage,
+ MessageChatCompletionRequestDeveloperMessage,
+ MessageChatCompletionRequestUserMessage,
+ MessageChatCompletionRequestAssistantMessage,
+ MessageChatCompletionRequestToolMessage,
+]
+
+
+class StreamOptions(TypedDict, total=False):
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
+ include_usage: bool
+ """If set, an additional chunk will be streamed before the `data: [DONE]` message.
+
+ The `usage` field on this chunk shows the token usage statistics for the entire
+ request, and the `choices` field will always be an empty array.
+
+ All other chunks will also include a `usage` field, but with a null value.
+ **NOTE:** If the stream is interrupted, you may not receive the final usage
+ chunk which contains the total token usage for the request.
+ """
+
+
+class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to call."""
+
+
+class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False):
+ """Specifies a tool the model should use.
+
+ Use to force the model to call a specific function.
+ """
+
+ function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction]
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice]
+
+
+class ToolFunction(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to be called.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ description: str
+ """
+ A description of what the function does, used by the model to choose when and
+ how to call the function.
+ """
+
+ parameters: Dict[str, object]
+ """The parameters the functions accepts, described as a JSON Schema object.
+
+ See the [guide](/docs/guides/function-calling) for examples, and the
+ [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ documentation about the format.
+
+ Omitting `parameters` defines a function with an empty parameter list.
+ """
+
+
+class Tool(TypedDict, total=False):
+ function: Required[ToolFunction]
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+ """
+
+
+class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+ """
+
+
+CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
diff --git a/src/gradient/types/chat/completion_create_response.py b/src/gradient/types/chat/completion_create_response.py
new file mode 100644
index 00000000..13efee40
--- /dev/null
+++ b/src/gradient/types/chat/completion_create_response.py
@@ -0,0 +1,118 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.completion_usage import CompletionUsage
+from ..shared.chat_completion_token_logprob import ChatCompletionTokenLogprob
+
+__all__ = [
+ "CompletionCreateResponse",
+ "Choice",
+ "ChoiceLogprobs",
+ "ChoiceMessage",
+ "ChoiceMessageToolCall",
+ "ChoiceMessageToolCallFunction",
+]
+
+
+class ChoiceLogprobs(BaseModel):
+ """Log probability information for the choice."""
+
+ content: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message content tokens with log probability information."""
+
+ refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message refusal tokens with log probability information."""
+
+
+class ChoiceMessageToolCallFunction(BaseModel):
+ """The function that the model called."""
+
+ arguments: str
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: str
+ """The name of the function to call."""
+
+
+class ChoiceMessageToolCall(BaseModel):
+ id: str
+ """The ID of the tool call."""
+
+ function: ChoiceMessageToolCallFunction
+ """The function that the model called."""
+
+ type: Literal["function"]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class ChoiceMessage(BaseModel):
+ """A chat completion message generated by the model."""
+
+ content: Optional[str] = None
+ """The contents of the message."""
+
+ reasoning_content: Optional[str] = None
+ """The reasoning content generated by the model."""
+
+ refusal: Optional[str] = None
+ """The refusal message generated by the model."""
+
+ role: Literal["assistant"]
+ """The role of the author of this message."""
+
+ tool_calls: Optional[List[ChoiceMessageToolCall]] = None
+ """The tool calls generated by the model, such as function calls."""
+
+
+class Choice(BaseModel):
+ finish_reason: Literal["stop", "length", "tool_calls", "content_filter"]
+ """The reason the model stopped generating tokens.
+
+ This will be `stop` if the model hit a natural stop point or a provided stop
+ sequence, or `length` if the maximum number of tokens specified in the request
+ was reached, `tool_calls` if the model called a tool.
+ """
+
+ index: int
+ """The index of the choice in the list of choices."""
+
+ logprobs: Optional[ChoiceLogprobs] = None
+ """Log probability information for the choice."""
+
+ message: ChoiceMessage
+ """A chat completion message generated by the model."""
+
+
+class CompletionCreateResponse(BaseModel):
+ """
+ Represents a chat completion response returned by model, based on the provided input.
+ """
+
+ id: str
+ """A unique identifier for the chat completion."""
+
+ choices: List[Choice]
+ """A list of chat completion choices.
+
+ Can be more than one if `n` is greater than 1.
+ """
+
+ created: int
+ """The Unix timestamp (in seconds) of when the chat completion was created."""
+
+ model: str
+ """The model used for the chat completion."""
+
+ object: Literal["chat.completion"]
+ """The object type, which is always `chat.completion`."""
+
+ usage: Optional[CompletionUsage] = None
+ """Usage statistics for the completion request."""
diff --git a/src/gradient/types/databases/__init__.py b/src/gradient/types/databases/__init__.py
new file mode 100644
index 00000000..f8ee8b14
--- /dev/null
+++ b/src/gradient/types/databases/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
diff --git a/src/gradient/types/databases/schema_registry/__init__.py b/src/gradient/types/databases/schema_registry/__init__.py
new file mode 100644
index 00000000..92c4e7a5
--- /dev/null
+++ b/src/gradient/types/databases/schema_registry/__init__.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .config_update_params import ConfigUpdateParams as ConfigUpdateParams
+from .config_update_response import ConfigUpdateResponse as ConfigUpdateResponse
+from .config_retrieve_response import ConfigRetrieveResponse as ConfigRetrieveResponse
+from .config_update_subject_params import ConfigUpdateSubjectParams as ConfigUpdateSubjectParams
+from .config_update_subject_response import ConfigUpdateSubjectResponse as ConfigUpdateSubjectResponse
+from .config_retrieve_subject_response import ConfigRetrieveSubjectResponse as ConfigRetrieveSubjectResponse
diff --git a/src/gradient/types/databases/schema_registry/config_retrieve_response.py b/src/gradient/types/databases/schema_registry/config_retrieve_response.py
new file mode 100644
index 00000000..583e4eec
--- /dev/null
+++ b/src/gradient/types/databases/schema_registry/config_retrieve_response.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConfigRetrieveResponse"]
+
+
+class ConfigRetrieveResponse(BaseModel):
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ]
+ """The compatibility level of the schema registry."""
diff --git a/src/gradient/types/databases/schema_registry/config_retrieve_subject_response.py b/src/gradient/types/databases/schema_registry/config_retrieve_subject_response.py
new file mode 100644
index 00000000..ec9fea68
--- /dev/null
+++ b/src/gradient/types/databases/schema_registry/config_retrieve_subject_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConfigRetrieveSubjectResponse"]
+
+
+class ConfigRetrieveSubjectResponse(BaseModel):
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ]
+ """The compatibility level of the schema registry."""
+
+ subject_name: str
+ """The name of the schema subject."""
diff --git a/src/gradient/types/databases/schema_registry/config_update_params.py b/src/gradient/types/databases/schema_registry/config_update_params.py
new file mode 100644
index 00000000..b25c7e92
--- /dev/null
+++ b/src/gradient/types/databases/schema_registry/config_update_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ConfigUpdateParams"]
+
+
+class ConfigUpdateParams(TypedDict, total=False):
+ compatibility_level: Required[
+ Literal["NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"]
+ ]
+ """The compatibility level of the schema registry."""
diff --git a/src/gradient/types/databases/schema_registry/config_update_response.py b/src/gradient/types/databases/schema_registry/config_update_response.py
new file mode 100644
index 00000000..0df776af
--- /dev/null
+++ b/src/gradient/types/databases/schema_registry/config_update_response.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConfigUpdateResponse"]
+
+
+class ConfigUpdateResponse(BaseModel):
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ]
+ """The compatibility level of the schema registry."""
diff --git a/src/gradient/types/databases/schema_registry/config_update_subject_params.py b/src/gradient/types/databases/schema_registry/config_update_subject_params.py
new file mode 100644
index 00000000..b935ba80
--- /dev/null
+++ b/src/gradient/types/databases/schema_registry/config_update_subject_params.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ConfigUpdateSubjectParams"]
+
+
+class ConfigUpdateSubjectParams(TypedDict, total=False):
+ database_cluster_uuid: Required[str]
+
+ compatibility_level: Required[
+ Literal["NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"]
+ ]
+ """The compatibility level of the schema registry."""
diff --git a/src/gradient/types/databases/schema_registry/config_update_subject_response.py b/src/gradient/types/databases/schema_registry/config_update_subject_response.py
new file mode 100644
index 00000000..3bb3cd24
--- /dev/null
+++ b/src/gradient/types/databases/schema_registry/config_update_subject_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConfigUpdateSubjectResponse"]
+
+
+class ConfigUpdateSubjectResponse(BaseModel):
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ]
+ """The compatibility level of the schema registry."""
+
+ subject_name: str
+ """The name of the schema subject."""
diff --git a/src/gradient/types/droplet_backup_policy.py b/src/gradient/types/droplet_backup_policy.py
new file mode 100644
index 00000000..63112e8f
--- /dev/null
+++ b/src/gradient/types/droplet_backup_policy.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["DropletBackupPolicy"]
+
+
+class DropletBackupPolicy(BaseModel):
+ hour: Optional[Literal[0, 4, 8, 12, 16, 20]] = None
+ """The hour of the day that the backup window will start."""
+
+ plan: Optional[Literal["daily", "weekly"]] = None
+ """The backup plan used for the Droplet.
+
+ The plan can be either `daily` or `weekly`.
+ """
+
+ retention_period_days: Optional[int] = None
+ """The number of days the backup will be retained."""
+
+ weekday: Optional[Literal["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"]] = None
+ """The day of the week on which the backup will occur."""
+
+ window_length_hours: Optional[int] = None
+ """The length of the backup window starting from `hour`."""
diff --git a/src/gradient/types/droplet_backup_policy_param.py b/src/gradient/types/droplet_backup_policy_param.py
new file mode 100644
index 00000000..802f057f
--- /dev/null
+++ b/src/gradient/types/droplet_backup_policy_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["DropletBackupPolicyParam"]
+
+
+class DropletBackupPolicyParam(TypedDict, total=False):
+ hour: Literal[0, 4, 8, 12, 16, 20]
+ """The hour of the day that the backup window will start."""
+
+ plan: Literal["daily", "weekly"]
+ """The backup plan used for the Droplet.
+
+ The plan can be either `daily` or `weekly`.
+ """
+
+ weekday: Literal["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"]
+ """The day of the week on which the backup will occur."""
diff --git a/src/gradient/types/gpu_droplet_create_params.py b/src/gradient/types/gpu_droplet_create_params.py
new file mode 100644
index 00000000..96403479
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_create_params.py
@@ -0,0 +1,214 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Required, TypeAlias, TypedDict
+
+from .._types import SequenceNotStr
+from .droplet_backup_policy_param import DropletBackupPolicyParam
+
+__all__ = ["GPUDropletCreateParams", "DropletSingleCreate", "DropletMultiCreate"]
+
+
+class DropletSingleCreate(TypedDict, total=False):
+ image: Required[Union[str, int]]
+ """
+ The image ID of a public or private image or the slug identifier for a public
+ image. This image will be the base image for your Droplet. Requires `image:read`
+ scope.
+ """
+
+ name: Required[str]
+ """The human-readable string you wish to use when displaying the Droplet name.
+
+ The name, if set to a domain name managed in the DigitalOcean DNS management
+ system, will configure a PTR record for the Droplet. The name set during
+ creation will also determine the hostname for the Droplet in its internal
+ configuration.
+ """
+
+ size: Required[str]
+ """The slug identifier for the size that you wish to select for this Droplet."""
+
+ backup_policy: DropletBackupPolicyParam
+ """An object specifying the backup policy for the Droplet.
+
+ If omitted and `backups` is `true`, the backup plan will default to daily.
+ """
+
+ backups: bool
+ """
+ A boolean indicating whether automated backups should be enabled for the
+ Droplet.
+ """
+
+ ipv6: bool
+ """A boolean indicating whether to enable IPv6 on the Droplet."""
+
+ monitoring: bool
+ """A boolean indicating whether to install the DigitalOcean agent for monitoring."""
+
+ private_networking: bool
+ """This parameter has been deprecated.
+
+ Use `vpc_uuid` instead to specify a VPC network for the Droplet. If no
+ `vpc_uuid` is provided, the Droplet will be placed in your account's default VPC
+ for the region.
+ """
+
+ region: str
+ """The slug identifier for the region that you wish to deploy the Droplet in.
+
+ If the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can
+ be used to deploy the Droplet in any of the that region's locations (`nyc1`,
+ `nyc2`, or `nyc3`). If the region is omitted from the create request completely,
+ the Droplet may deploy in any region.
+ """
+
+ ssh_keys: SequenceNotStr[Union[str, int]]
+ """
+ An array containing the IDs or fingerprints of the SSH keys that you wish to
+ embed in the Droplet's root account upon creation. You must add the keys to your
+ team before they can be embedded on a Droplet. Requires `ssh_key:read` scope.
+ """
+
+ tags: Optional[SequenceNotStr[str]]
+ """A flat array of tag names as strings to apply to the Droplet after it is
+ created.
+
+ Tag names can either be existing or new tags. Requires `tag:create` scope.
+ """
+
+ user_data: str
+ """
+ A string containing 'user data' which may be used to configure the Droplet on
+ first boot, often a 'cloud-config' file or Bash script. It must be plain text
+ and may not exceed 64 KiB in size.
+ """
+
+ volumes: SequenceNotStr[str]
+ """
+ An array of IDs for block storage volumes that will be attached to the Droplet
+ once created. The volumes must not already be attached to an existing Droplet.
+ Requires `block_storage:read` scpoe.
+ """
+
+ vpc_uuid: str
+ """A string specifying the UUID of the VPC to which the Droplet will be assigned.
+
+ If excluded, the Droplet will be assigned to your account's default VPC for the
+ region. Requires `vpc:read` scope.
+ """
+
+ with_droplet_agent: bool
+ """
+ A boolean indicating whether to install the DigitalOcean agent used for
+ providing access to the Droplet web console in the control panel. By default,
+ the agent is installed on new Droplets but installation errors (i.e. OS not
+ supported) are ignored. To prevent it from being installed, set to `false`. To
+ make installation errors fatal, explicitly set it to `true`.
+ """
+
+
+class DropletMultiCreate(TypedDict, total=False):
+ image: Required[Union[str, int]]
+ """
+ The image ID of a public or private image or the slug identifier for a public
+ image. This image will be the base image for your Droplet. Requires `image:read`
+ scope.
+ """
+
+ names: Required[SequenceNotStr[str]]
+ """
+ An array of human human-readable strings you wish to use when displaying the
+ Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS
+ management system, will configure a PTR record for the Droplet. Each name set
+ during creation will also determine the hostname for the Droplet in its internal
+ configuration.
+ """
+
+ size: Required[str]
+ """The slug identifier for the size that you wish to select for this Droplet."""
+
+ backup_policy: DropletBackupPolicyParam
+ """An object specifying the backup policy for the Droplet.
+
+ If omitted and `backups` is `true`, the backup plan will default to daily.
+ """
+
+ backups: bool
+ """
+ A boolean indicating whether automated backups should be enabled for the
+ Droplet.
+ """
+
+ ipv6: bool
+ """A boolean indicating whether to enable IPv6 on the Droplet."""
+
+ monitoring: bool
+ """A boolean indicating whether to install the DigitalOcean agent for monitoring."""
+
+ private_networking: bool
+ """This parameter has been deprecated.
+
+ Use `vpc_uuid` instead to specify a VPC network for the Droplet. If no
+ `vpc_uuid` is provided, the Droplet will be placed in your account's default VPC
+ for the region.
+ """
+
+ region: str
+ """The slug identifier for the region that you wish to deploy the Droplet in.
+
+ If the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can
+ be used to deploy the Droplet in any of the that region's locations (`nyc1`,
+ `nyc2`, or `nyc3`). If the region is omitted from the create request completely,
+ the Droplet may deploy in any region.
+ """
+
+ ssh_keys: SequenceNotStr[Union[str, int]]
+ """
+ An array containing the IDs or fingerprints of the SSH keys that you wish to
+ embed in the Droplet's root account upon creation. You must add the keys to your
+ team before they can be embedded on a Droplet. Requires `ssh_key:read` scope.
+ """
+
+ tags: Optional[SequenceNotStr[str]]
+ """A flat array of tag names as strings to apply to the Droplet after it is
+ created.
+
+ Tag names can either be existing or new tags. Requires `tag:create` scope.
+ """
+
+ user_data: str
+ """
+ A string containing 'user data' which may be used to configure the Droplet on
+ first boot, often a 'cloud-config' file or Bash script. It must be plain text
+ and may not exceed 64 KiB in size.
+ """
+
+ volumes: SequenceNotStr[str]
+ """
+ An array of IDs for block storage volumes that will be attached to the Droplet
+ once created. The volumes must not already be attached to an existing Droplet.
+ Requires `block_storage:read` scpoe.
+ """
+
+ vpc_uuid: str
+ """A string specifying the UUID of the VPC to which the Droplet will be assigned.
+
+ If excluded, the Droplet will be assigned to your account's default VPC for the
+ region. Requires `vpc:read` scope.
+ """
+
+ with_droplet_agent: bool
+ """
+ A boolean indicating whether to install the DigitalOcean agent used for
+ providing access to the Droplet web console in the control panel. By default,
+ the agent is installed on new Droplets but installation errors (i.e. OS not
+ supported) are ignored. To prevent it from being installed, set to `false`. To
+ make installation errors fatal, explicitly set it to `true`.
+ """
+
+
+GPUDropletCreateParams: TypeAlias = Union[DropletSingleCreate, DropletMultiCreate]
diff --git a/src/gradient/types/gpu_droplet_create_response.py b/src/gradient/types/gpu_droplet_create_response.py
new file mode 100644
index 00000000..72fafb96
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_create_response.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import TypeAlias
+
+from .._models import BaseModel
+from .shared.droplet import Droplet
+from .shared.action_link import ActionLink
+
+__all__ = [
+ "GPUDropletCreateResponse",
+ "SingleDropletResponse",
+ "SingleDropletResponseLinks",
+ "MultipleDropletResponse",
+ "MultipleDropletResponseLinks",
+]
+
+
+class SingleDropletResponseLinks(BaseModel):
+ actions: Optional[List[ActionLink]] = None
+
+
+class SingleDropletResponse(BaseModel):
+ droplet: Droplet
+
+ links: SingleDropletResponseLinks
+
+
+class MultipleDropletResponseLinks(BaseModel):
+ actions: Optional[List[ActionLink]] = None
+
+
+class MultipleDropletResponse(BaseModel):
+ droplets: List[Droplet]
+
+ links: MultipleDropletResponseLinks
+
+
+GPUDropletCreateResponse: TypeAlias = Union[SingleDropletResponse, MultipleDropletResponse]
diff --git a/src/gradient/types/gpu_droplet_delete_by_tag_params.py b/src/gradient/types/gpu_droplet_delete_by_tag_params.py
new file mode 100644
index 00000000..bc303125
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_delete_by_tag_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["GPUDropletDeleteByTagParams"]
+
+
+class GPUDropletDeleteByTagParams(TypedDict, total=False):
+ tag_name: Required[str]
+ """Specifies Droplets to be deleted by tag."""
diff --git a/src/gradient/types/gpu_droplet_list_firewalls_params.py b/src/gradient/types/gpu_droplet_list_firewalls_params.py
new file mode 100644
index 00000000..1f0111d8
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_list_firewalls_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["GPUDropletListFirewallsParams"]
+
+
+class GPUDropletListFirewallsParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplet_list_firewalls_response.py b/src/gradient/types/gpu_droplet_list_firewalls_response.py
new file mode 100644
index 00000000..617cdf98
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_list_firewalls_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .shared.page_links import PageLinks
+from .gpu_droplets.firewall import Firewall
+from .shared.meta_properties import MetaProperties
+
+__all__ = ["GPUDropletListFirewallsResponse"]
+
+
+class GPUDropletListFirewallsResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ firewalls: Optional[List[Firewall]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplet_list_kernels_params.py b/src/gradient/types/gpu_droplet_list_kernels_params.py
new file mode 100644
index 00000000..7aa73225
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_list_kernels_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["GPUDropletListKernelsParams"]
+
+
+class GPUDropletListKernelsParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplet_list_kernels_response.py b/src/gradient/types/gpu_droplet_list_kernels_response.py
new file mode 100644
index 00000000..5fa9a355
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_list_kernels_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .shared.kernel import Kernel
+from .shared.page_links import PageLinks
+from .shared.meta_properties import MetaProperties
+
+__all__ = ["GPUDropletListKernelsResponse"]
+
+
+class GPUDropletListKernelsResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ kernels: Optional[List[Optional[Kernel]]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplet_list_neighbors_response.py b/src/gradient/types/gpu_droplet_list_neighbors_response.py
new file mode 100644
index 00000000..cdfce3e0
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_list_neighbors_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .shared.droplet import Droplet
+
+__all__ = ["GPUDropletListNeighborsResponse"]
+
+
+class GPUDropletListNeighborsResponse(BaseModel):
+ droplets: Optional[List[Droplet]] = None
diff --git a/src/gradient/types/gpu_droplet_list_params.py b/src/gradient/types/gpu_droplet_list_params.py
new file mode 100644
index 00000000..bf6eb793
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_list_params.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["GPUDropletListParams"]
+
+
+class GPUDropletListParams(TypedDict, total=False):
+ name: str
+ """Used to filter list response by Droplet name returning only exact matches.
+
+ It is case-insensitive and can not be combined with `tag_name`.
+ """
+
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
+
+ tag_name: str
+ """Used to filter Droplets by a specific tag.
+
+ Can not be combined with `name` or `type`. Requires `tag:read` scope.
+ """
+
+ type: Literal["droplets", "gpus"]
+ """When `type` is set to `gpus`, only GPU Droplets will be returned.
+
+ By default, only non-GPU Droplets are returned. Can not be combined with
+ `tag_name`.
+ """
diff --git a/src/gradient/types/gpu_droplet_list_response.py b/src/gradient/types/gpu_droplet_list_response.py
new file mode 100644
index 00000000..73e1e503
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .shared.droplet import Droplet
+from .shared.page_links import PageLinks
+from .shared.meta_properties import MetaProperties
+
+__all__ = ["GPUDropletListResponse"]
+
+
+class GPUDropletListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ droplets: Optional[List[Droplet]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplet_list_snapshots_params.py b/src/gradient/types/gpu_droplet_list_snapshots_params.py
new file mode 100644
index 00000000..66e65a36
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_list_snapshots_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["GPUDropletListSnapshotsParams"]
+
+
+class GPUDropletListSnapshotsParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplet_list_snapshots_response.py b/src/gradient/types/gpu_droplet_list_snapshots_response.py
new file mode 100644
index 00000000..4b34d670
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_list_snapshots_response.py
@@ -0,0 +1,53 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .shared.page_links import PageLinks
+from .shared.meta_properties import MetaProperties
+
+__all__ = ["GPUDropletListSnapshotsResponse", "Snapshot"]
+
+
+class Snapshot(BaseModel):
+ id: int
+ """The unique identifier for the snapshot or backup."""
+
+ created_at: datetime
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the snapshot was created.
+ """
+
+ min_disk_size: int
+ """The minimum size in GB required for a volume or Droplet to use this snapshot."""
+
+ name: str
+ """A human-readable name for the snapshot."""
+
+ regions: List[str]
+ """An array of the regions that the snapshot is available in.
+
+ The regions are represented by their identifying slug values.
+ """
+
+ size_gigabytes: float
+ """The billable size of the snapshot in gigabytes."""
+
+ type: Literal["snapshot", "backup"]
+ """Describes the kind of image.
+
+ It may be one of `snapshot` or `backup`. This specifies whether an image is a
+ user-generated Droplet snapshot or automatically created Droplet backup.
+ """
+
+
+class GPUDropletListSnapshotsResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ links: Optional[PageLinks] = None
+
+ snapshots: Optional[List[Snapshot]] = None
diff --git a/src/gradient/types/gpu_droplet_retrieve_response.py b/src/gradient/types/gpu_droplet_retrieve_response.py
new file mode 100644
index 00000000..d8cc0f20
--- /dev/null
+++ b/src/gradient/types/gpu_droplet_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+from .shared.droplet import Droplet
+
+__all__ = ["GPUDropletRetrieveResponse"]
+
+
+class GPUDropletRetrieveResponse(BaseModel):
+ droplet: Optional[Droplet] = None
diff --git a/src/gradient/types/gpu_droplets/__init__.py b/src/gradient/types/gpu_droplets/__init__.py
new file mode 100644
index 00000000..c2f1835f
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/__init__.py
@@ -0,0 +1,104 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .domains import Domains as Domains
+from .firewall import Firewall as Firewall
+from .floating_ip import FloatingIP as FloatingIP
+from .lb_firewall import LbFirewall as LbFirewall
+from .glb_settings import GlbSettings as GlbSettings
+from .health_check import HealthCheck as HealthCheck
+from .domains_param import DomainsParam as DomainsParam
+from .load_balancer import LoadBalancer as LoadBalancer
+from .autoscale_pool import AutoscalePool as AutoscalePool
+from .firewall_param import FirewallParam as FirewallParam
+from .forwarding_rule import ForwardingRule as ForwardingRule
+from .sticky_sessions import StickySessions as StickySessions
+from .size_list_params import SizeListParams as SizeListParams
+from .image_list_params import ImageListParams as ImageListParams
+from .lb_firewall_param import LbFirewallParam as LbFirewallParam
+from .action_list_params import ActionListParams as ActionListParams
+from .backup_list_params import BackupListParams as BackupListParams
+from .glb_settings_param import GlbSettingsParam as GlbSettingsParam
+from .health_check_param import HealthCheckParam as HealthCheckParam
+from .size_list_response import SizeListResponse as SizeListResponse
+from .volume_list_params import VolumeListParams as VolumeListParams
+from .associated_resource import AssociatedResource as AssociatedResource
+from .current_utilization import CurrentUtilization as CurrentUtilization
+from .image_create_params import ImageCreateParams as ImageCreateParams
+from .image_list_response import ImageListResponse as ImageListResponse
+from .image_update_params import ImageUpdateParams as ImageUpdateParams
+from .action_list_response import ActionListResponse as ActionListResponse
+from .backup_list_response import BackupListResponse as BackupListResponse
+from .firewall_list_params import FirewallListParams as FirewallListParams
+from .snapshot_list_params import SnapshotListParams as SnapshotListParams
+from .volume_create_params import VolumeCreateParams as VolumeCreateParams
+from .volume_list_response import VolumeListResponse as VolumeListResponse
+from .autoscale_list_params import AutoscaleListParams as AutoscaleListParams
+from .forwarding_rule_param import ForwardingRuleParam as ForwardingRuleParam
+from .image_create_response import ImageCreateResponse as ImageCreateResponse
+from .image_update_response import ImageUpdateResponse as ImageUpdateResponse
+from .sticky_sessions_param import StickySessionsParam as StickySessionsParam
+from .action_initiate_params import ActionInitiateParams as ActionInitiateParams
+from .firewall_create_params import FirewallCreateParams as FirewallCreateParams
+from .firewall_list_response import FirewallListResponse as FirewallListResponse
+from .firewall_update_params import FirewallUpdateParams as FirewallUpdateParams
+from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse
+from .volume_create_response import VolumeCreateResponse as VolumeCreateResponse
+from .autoscale_create_params import AutoscaleCreateParams as AutoscaleCreateParams
+from .autoscale_list_response import AutoscaleListResponse as AutoscaleListResponse
+from .autoscale_update_params import AutoscaleUpdateParams as AutoscaleUpdateParams
+from .floating_ip_list_params import FloatingIPListParams as FloatingIPListParams
+from .image_retrieve_response import ImageRetrieveResponse as ImageRetrieveResponse
+from .action_initiate_response import ActionInitiateResponse as ActionInitiateResponse
+from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse
+from .firewall_create_response import FirewallCreateResponse as FirewallCreateResponse
+from .firewall_update_response import FirewallUpdateResponse as FirewallUpdateResponse
+from .volume_retrieve_response import VolumeRetrieveResponse as VolumeRetrieveResponse
+from .autoscale_create_response import AutoscaleCreateResponse as AutoscaleCreateResponse
+from .autoscale_update_response import AutoscaleUpdateResponse as AutoscaleUpdateResponse
+from .floating_ip_create_params import FloatingIPCreateParams as FloatingIPCreateParams
+from .floating_ip_list_response import FloatingIPListResponse as FloatingIPListResponse
+from .load_balancer_list_params import LoadBalancerListParams as LoadBalancerListParams
+from .firewall_retrieve_response import FirewallRetrieveResponse as FirewallRetrieveResponse
+from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse
+from .action_bulk_initiate_params import ActionBulkInitiateParams as ActionBulkInitiateParams
+from .autoscale_retrieve_response import AutoscaleRetrieveResponse as AutoscaleRetrieveResponse
+from .backup_list_policies_params import BackupListPoliciesParams as BackupListPoliciesParams
+from .floating_ip_create_response import FloatingIPCreateResponse as FloatingIPCreateResponse
+from .load_balancer_create_params import LoadBalancerCreateParams as LoadBalancerCreateParams
+from .load_balancer_list_response import LoadBalancerListResponse as LoadBalancerListResponse
+from .load_balancer_update_params import LoadBalancerUpdateParams as LoadBalancerUpdateParams
+from .autoscale_pool_static_config import AutoscalePoolStaticConfig as AutoscalePoolStaticConfig
+from .volume_delete_by_name_params import VolumeDeleteByNameParams as VolumeDeleteByNameParams
+from .action_bulk_initiate_response import ActionBulkInitiateResponse as ActionBulkInitiateResponse
+from .autoscale_list_history_params import AutoscaleListHistoryParams as AutoscaleListHistoryParams
+from .autoscale_list_members_params import AutoscaleListMembersParams as AutoscaleListMembersParams
+from .autoscale_pool_dynamic_config import AutoscalePoolDynamicConfig as AutoscalePoolDynamicConfig
+from .backup_list_policies_response import BackupListPoliciesResponse as BackupListPoliciesResponse
+from .destroyed_associated_resource import DestroyedAssociatedResource as DestroyedAssociatedResource
+from .floating_ip_retrieve_response import FloatingIPRetrieveResponse as FloatingIPRetrieveResponse
+from .load_balancer_create_response import LoadBalancerCreateResponse as LoadBalancerCreateResponse
+from .load_balancer_update_response import LoadBalancerUpdateResponse as LoadBalancerUpdateResponse
+from .autoscale_list_history_response import AutoscaleListHistoryResponse as AutoscaleListHistoryResponse
+from .autoscale_list_members_response import AutoscaleListMembersResponse as AutoscaleListMembersResponse
+from .autoscale_pool_droplet_template import AutoscalePoolDropletTemplate as AutoscalePoolDropletTemplate
+from .backup_retrieve_policy_response import BackupRetrievePolicyResponse as BackupRetrievePolicyResponse
+from .load_balancer_retrieve_response import LoadBalancerRetrieveResponse as LoadBalancerRetrieveResponse
+from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam as AutoscalePoolStaticConfigParam
+from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam as AutoscalePoolDynamicConfigParam
+from .autoscale_pool_droplet_template_param import (
+ AutoscalePoolDropletTemplateParam as AutoscalePoolDropletTemplateParam,
+)
+from .backup_list_supported_policies_response import (
+ BackupListSupportedPoliciesResponse as BackupListSupportedPoliciesResponse,
+)
+from .destroy_with_associated_resource_list_response import (
+ DestroyWithAssociatedResourceListResponse as DestroyWithAssociatedResourceListResponse,
+)
+from .destroy_with_associated_resource_check_status_response import (
+ DestroyWithAssociatedResourceCheckStatusResponse as DestroyWithAssociatedResourceCheckStatusResponse,
+)
+from .destroy_with_associated_resource_delete_selective_params import (
+ DestroyWithAssociatedResourceDeleteSelectiveParams as DestroyWithAssociatedResourceDeleteSelectiveParams,
+)
diff --git a/src/gradient/types/gpu_droplets/account/__init__.py b/src/gradient/types/gpu_droplets/account/__init__.py
new file mode 100644
index 00000000..2d8a05ae
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/account/__init__.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .ssh_keys import SSHKeys as SSHKeys
+from .key_list_params import KeyListParams as KeyListParams
+from .key_create_params import KeyCreateParams as KeyCreateParams
+from .key_list_response import KeyListResponse as KeyListResponse
+from .key_update_params import KeyUpdateParams as KeyUpdateParams
+from .key_create_response import KeyCreateResponse as KeyCreateResponse
+from .key_update_response import KeyUpdateResponse as KeyUpdateResponse
+from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse
diff --git a/src/gradient/types/gpu_droplets/account/key_create_params.py b/src/gradient/types/gpu_droplets/account/key_create_params.py
new file mode 100644
index 00000000..4e7c1cef
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/account/key_create_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["KeyCreateParams"]
+
+
+class KeyCreateParams(TypedDict, total=False):
+ name: Required[str]
+ """
+ A human-readable display name for this key, used to easily identify the SSH keys
+ when they are displayed.
+ """
+
+ public_key: Required[str]
+ """The entire public key string that was uploaded.
+
+ Embedded into the root user's `authorized_keys` file if you include this key
+ during Droplet creation.
+ """
diff --git a/src/gradient/types/gpu_droplets/account/key_create_response.py b/src/gradient/types/gpu_droplets/account/key_create_response.py
new file mode 100644
index 00000000..5ce63269
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/account/key_create_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .ssh_keys import SSHKeys
+from ...._models import BaseModel
+
+__all__ = ["KeyCreateResponse"]
+
+
+class KeyCreateResponse(BaseModel):
+ ssh_key: Optional[SSHKeys] = None
diff --git a/src/gradient/types/gpu_droplets/account/key_list_params.py b/src/gradient/types/gpu_droplets/account/key_list_params.py
new file mode 100644
index 00000000..44a455f3
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/account/key_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListParams"]
+
+
+class KeyListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/account/key_list_response.py b/src/gradient/types/gpu_droplets/account/key_list_response.py
new file mode 100644
index 00000000..1151043e
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/account/key_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .ssh_keys import SSHKeys
+from ...._models import BaseModel
+from ...shared.page_links import PageLinks
+from ...shared.meta_properties import MetaProperties
+
+__all__ = ["KeyListResponse"]
+
+
+class KeyListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ links: Optional[PageLinks] = None
+
+ ssh_keys: Optional[List[SSHKeys]] = None
diff --git a/src/gradient/types/gpu_droplets/account/key_retrieve_response.py b/src/gradient/types/gpu_droplets/account/key_retrieve_response.py
new file mode 100644
index 00000000..da6e94d1
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/account/key_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .ssh_keys import SSHKeys
+from ...._models import BaseModel
+
+__all__ = ["KeyRetrieveResponse"]
+
+
+class KeyRetrieveResponse(BaseModel):
+ ssh_key: Optional[SSHKeys] = None
diff --git a/src/gradient/types/gpu_droplets/account/key_update_params.py b/src/gradient/types/gpu_droplets/account/key_update_params.py
new file mode 100644
index 00000000..e73d8b7b
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/account/key_update_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyUpdateParams"]
+
+
+class KeyUpdateParams(TypedDict, total=False):
+ name: str
+ """
+ A human-readable display name for this key, used to easily identify the SSH keys
+ when they are displayed.
+ """
diff --git a/src/gradient/types/gpu_droplets/account/key_update_response.py b/src/gradient/types/gpu_droplets/account/key_update_response.py
new file mode 100644
index 00000000..54b81426
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/account/key_update_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .ssh_keys import SSHKeys
+from ...._models import BaseModel
+
+__all__ = ["KeyUpdateResponse"]
+
+
+class KeyUpdateResponse(BaseModel):
+ ssh_key: Optional[SSHKeys] = None
diff --git a/src/gradient/types/gpu_droplets/account/ssh_keys.py b/src/gradient/types/gpu_droplets/account/ssh_keys.py
new file mode 100644
index 00000000..8112c18a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/account/ssh_keys.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["SSHKeys"]
+
+
+class SSHKeys(BaseModel):
+ name: str
+ """
+ A human-readable display name for this key, used to easily identify the SSH keys
+ when they are displayed.
+ """
+
+ public_key: str
+ """The entire public key string that was uploaded.
+
+ Embedded into the root user's `authorized_keys` file if you include this key
+ during Droplet creation.
+ """
+
+ id: Optional[int] = None
+ """A unique identification number for this key.
+
+ Can be used to embed a specific SSH key into a Droplet.
+ """
+
+ fingerprint: Optional[str] = None
+ """
+ A unique identifier that differentiates this key from other keys using a format
+ that SSH recognizes. The fingerprint is created when the key is added to your
+ account.
+ """
diff --git a/src/gradient/types/gpu_droplets/action_bulk_initiate_params.py b/src/gradient/types/gpu_droplets/action_bulk_initiate_params.py
new file mode 100644
index 00000000..a6402096
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/action_bulk_initiate_params.py
@@ -0,0 +1,72 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = ["ActionBulkInitiateParams", "DropletAction", "DropletActionSnapshot"]
+
+
+class DropletAction(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+ tag_name: str
+ """Used to filter Droplets by a specific tag.
+
+ Can not be combined with `name` or `type`. Requires `tag:read` scope.
+ """
+
+
+class DropletActionSnapshot(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+ tag_name: str
+ """Used to filter Droplets by a specific tag.
+
+ Can not be combined with `name` or `type`. Requires `tag:read` scope.
+ """
+
+ name: str
+ """The name to give the new snapshot of the Droplet."""
+
+
+ActionBulkInitiateParams: TypeAlias = Union[DropletAction, DropletActionSnapshot]
diff --git a/src/gradient/types/gpu_droplets/action_bulk_initiate_response.py b/src/gradient/types/gpu_droplets/action_bulk_initiate_response.py
new file mode 100644
index 00000000..905860d7
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/action_bulk_initiate_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.action import Action
+
+__all__ = ["ActionBulkInitiateResponse"]
+
+
+class ActionBulkInitiateResponse(BaseModel):
+ actions: Optional[List[Action]] = None
diff --git a/src/gradient/types/gpu_droplets/action_initiate_params.py b/src/gradient/types/gpu_droplets/action_initiate_params.py
new file mode 100644
index 00000000..f0ef6b1e
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/action_initiate_params.py
@@ -0,0 +1,278 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..droplet_backup_policy_param import DropletBackupPolicyParam
+
+__all__ = [
+ "ActionInitiateParams",
+ "DropletAction",
+ "DropletActionEnableBackups",
+ "DropletActionChangeBackupPolicy",
+ "DropletActionRestore",
+ "DropletActionResize",
+ "DropletActionRebuild",
+ "DropletActionRename",
+ "DropletActionChangeKernel",
+ "DropletActionSnapshot",
+]
+
+
+class DropletAction(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+
+class DropletActionEnableBackups(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+ backup_policy: DropletBackupPolicyParam
+ """An object specifying the backup policy for the Droplet.
+
+ If omitted, the backup plan will default to daily.
+ """
+
+
+class DropletActionChangeBackupPolicy(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+ backup_policy: DropletBackupPolicyParam
+ """An object specifying the backup policy for the Droplet."""
+
+
+class DropletActionRestore(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+ image: int
+ """The ID of a backup of the current Droplet instance to restore from."""
+
+
+class DropletActionResize(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+ disk: bool
+ """When `true`, the Droplet's disk will be resized in addition to its RAM and CPU.
+
+ This is a permanent change and cannot be reversed as a Droplet's disk size
+ cannot be decreased.
+ """
+
+ size: str
+ """The slug identifier for the size to which you wish to resize the Droplet."""
+
+
+class DropletActionRebuild(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+ image: Union[str, int]
+ """
+ The image ID of a public or private image or the slug identifier for a public
+ image. The Droplet will be rebuilt using this image as its base.
+ """
+
+
+class DropletActionRename(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+ name: str
+ """The new name for the Droplet."""
+
+
+class DropletActionChangeKernel(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+ kernel: int
+ """A unique number used to identify and reference a specific kernel."""
+
+
+class DropletActionSnapshot(TypedDict, total=False):
+ type: Required[
+ Literal[
+ "enable_backups",
+ "disable_backups",
+ "reboot",
+ "power_cycle",
+ "shutdown",
+ "power_off",
+ "power_on",
+ "restore",
+ "password_reset",
+ "resize",
+ "rebuild",
+ "rename",
+ "change_kernel",
+ "enable_ipv6",
+ "snapshot",
+ ]
+ ]
+ """The type of action to initiate for the Droplet."""
+
+ name: str
+ """The name to give the new snapshot of the Droplet."""
+
+
+ActionInitiateParams: TypeAlias = Union[
+ DropletAction,
+ DropletActionEnableBackups,
+ DropletActionChangeBackupPolicy,
+ DropletActionRestore,
+ DropletActionResize,
+ DropletActionRebuild,
+ DropletActionRename,
+ DropletActionChangeKernel,
+ DropletActionSnapshot,
+]
diff --git a/src/gradient/types/gpu_droplets/action_initiate_response.py b/src/gradient/types/gpu_droplets/action_initiate_response.py
new file mode 100644
index 00000000..087781d1
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/action_initiate_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from ..shared.action import Action
+
+__all__ = ["ActionInitiateResponse"]
+
+
+class ActionInitiateResponse(BaseModel):
+ action: Optional[Action] = None
diff --git a/src/gradient/types/gpu_droplets/action_list_params.py b/src/gradient/types/gpu_droplets/action_list_params.py
new file mode 100644
index 00000000..dd873288
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/action_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ActionListParams"]
+
+
+class ActionListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/action_list_response.py b/src/gradient/types/gpu_droplets/action_list_response.py
new file mode 100644
index 00000000..1a20f780
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/action_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.action import Action
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["ActionListResponse"]
+
+
+class ActionListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ actions: Optional[List[Action]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/action_retrieve_response.py b/src/gradient/types/gpu_droplets/action_retrieve_response.py
new file mode 100644
index 00000000..3856228d
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/action_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from ..shared.action import Action
+
+__all__ = ["ActionRetrieveResponse"]
+
+
+class ActionRetrieveResponse(BaseModel):
+ action: Optional[Action] = None
diff --git a/src/gradient/types/gpu_droplets/associated_resource.py b/src/gradient/types/gpu_droplets/associated_resource.py
new file mode 100644
index 00000000..500c69e2
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/associated_resource.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["AssociatedResource"]
+
+
+class AssociatedResource(BaseModel):
+ """An objects containing information about a resource associated with a Droplet."""
+
+ id: Optional[str] = None
+ """The unique identifier for the resource associated with the Droplet."""
+
+ cost: Optional[str] = None
+ """
+ The cost of the resource in USD per month if the resource is retained after the
+ Droplet is destroyed.
+ """
+
+ name: Optional[str] = None
+ """The name of the resource associated with the Droplet."""
diff --git a/src/gradient/types/gpu_droplets/autoscale_create_params.py b/src/gradient/types/gpu_droplets/autoscale_create_params.py
new file mode 100644
index 00000000..0f3c05a6
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_create_params.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Required, TypeAlias, TypedDict
+
+from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam
+from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam
+from .autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam
+
+__all__ = ["AutoscaleCreateParams", "Config"]
+
+
+class AutoscaleCreateParams(TypedDict, total=False):
+ config: Required[Config]
+ """
+ The scaling configuration for an autoscale pool, which is how the pool scales up
+ and down (either by resource utilization or static configuration).
+ """
+
+ droplet_template: Required[AutoscalePoolDropletTemplateParam]
+
+ name: Required[str]
+ """The human-readable name of the autoscale pool. This field cannot be updated"""
+
+
+Config: TypeAlias = Union[AutoscalePoolStaticConfigParam, AutoscalePoolDynamicConfigParam]
diff --git a/src/gradient/types/gpu_droplets/autoscale_create_response.py b/src/gradient/types/gpu_droplets/autoscale_create_response.py
new file mode 100644
index 00000000..819297e9
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_create_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .autoscale_pool import AutoscalePool
+
+__all__ = ["AutoscaleCreateResponse"]
+
+
+class AutoscaleCreateResponse(BaseModel):
+ autoscale_pool: Optional[AutoscalePool] = None
diff --git a/src/gradient/types/gpu_droplets/autoscale_list_history_params.py b/src/gradient/types/gpu_droplets/autoscale_list_history_params.py
new file mode 100644
index 00000000..f837a11e
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_list_history_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AutoscaleListHistoryParams"]
+
+
+class AutoscaleListHistoryParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/autoscale_list_history_response.py b/src/gradient/types/gpu_droplets/autoscale_list_history_response.py
new file mode 100644
index 00000000..843f44d8
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_list_history_response.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["AutoscaleListHistoryResponse", "History"]
+
+
+class History(BaseModel):
+ created_at: datetime
+ """
+ The creation time of the history event in ISO8601 combined date and time format.
+ """
+
+ current_instance_count: int
+ """The current number of Droplets in the autoscale pool."""
+
+ desired_instance_count: int
+ """The target number of Droplets for the autoscale pool after the scaling event."""
+
+ history_event_id: str
+ """The unique identifier of the history event."""
+
+ reason: Literal["CONFIGURATION_CHANGE", "SCALE_UP", "SCALE_DOWN"]
+ """The reason for the scaling event."""
+
+ status: Literal["in_progress", "success", "error"]
+ """The status of the scaling event."""
+
+ updated_at: datetime
+ """
+ The last updated time of the history event in ISO8601 combined date and time
+ format.
+ """
+
+
+class AutoscaleListHistoryResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ history: Optional[List[History]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/autoscale_list_members_params.py b/src/gradient/types/gpu_droplets/autoscale_list_members_params.py
new file mode 100644
index 00000000..5a7f738d
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_list_members_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AutoscaleListMembersParams"]
+
+
+class AutoscaleListMembersParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/autoscale_list_members_response.py b/src/gradient/types/gpu_droplets/autoscale_list_members_response.py
new file mode 100644
index 00000000..337ac4e3
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_list_members_response.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["AutoscaleListMembersResponse", "Droplet", "DropletCurrentUtilization"]
+
+
+class DropletCurrentUtilization(BaseModel):
+ cpu: Optional[float] = None
+ """The CPU utilization average of the individual Droplet."""
+
+ memory: Optional[float] = None
+ """The memory utilization average of the individual Droplet."""
+
+
+class Droplet(BaseModel):
+ created_at: datetime
+ """The creation time of the Droplet in ISO8601 combined date and time format."""
+
+ current_utilization: DropletCurrentUtilization
+
+ droplet_id: int
+ """The unique identifier of the Droplet."""
+
+ health_status: str
+ """The health status of the Droplet."""
+
+ status: Literal["provisioning", "active", "deleting", "off"]
+ """The power status of the Droplet."""
+
+ updated_at: datetime
+ """The last updated time of the Droplet in ISO8601 combined date and time format."""
+
+
+class AutoscaleListMembersResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ droplets: Optional[List[Droplet]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/autoscale_list_params.py b/src/gradient/types/gpu_droplets/autoscale_list_params.py
new file mode 100644
index 00000000..3a35e616
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_list_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AutoscaleListParams"]
+
+
+class AutoscaleListParams(TypedDict, total=False):
+ name: str
+ """The name of the autoscale pool"""
+
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/autoscale_list_response.py b/src/gradient/types/gpu_droplets/autoscale_list_response.py
new file mode 100644
index 00000000..807cb17f
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .autoscale_pool import AutoscalePool
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["AutoscaleListResponse"]
+
+
+class AutoscaleListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ autoscale_pools: Optional[List[AutoscalePool]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/autoscale_pool.py b/src/gradient/types/gpu_droplets/autoscale_pool.py
new file mode 100644
index 00000000..2964319e
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_pool.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from datetime import datetime
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from .current_utilization import CurrentUtilization
+from .autoscale_pool_static_config import AutoscalePoolStaticConfig
+from .autoscale_pool_dynamic_config import AutoscalePoolDynamicConfig
+from .autoscale_pool_droplet_template import AutoscalePoolDropletTemplate
+
+__all__ = ["AutoscalePool", "Config"]
+
+Config: TypeAlias = Union[AutoscalePoolStaticConfig, AutoscalePoolDynamicConfig]
+
+
+class AutoscalePool(BaseModel):
+ id: str
+ """A unique identifier for each autoscale pool instance.
+
+ This is automatically generated upon autoscale pool creation.
+ """
+
+ active_resources_count: int
+ """The number of active Droplets in the autoscale pool."""
+
+ config: Config
+ """
+ The scaling configuration for an autoscale pool, which is how the pool scales up
+ and down (either by resource utilization or static configuration).
+ """
+
+ created_at: datetime
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the autoscale pool was created.
+ """
+
+ droplet_template: AutoscalePoolDropletTemplate
+
+ name: str
+ """The human-readable name set for the autoscale pool."""
+
+ status: Literal["active", "deleting", "error"]
+ """The current status of the autoscale pool."""
+
+ updated_at: datetime
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the autoscale pool was last updated.
+ """
+
+ current_utilization: Optional[CurrentUtilization] = None
diff --git a/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template.py b/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template.py
new file mode 100644
index 00000000..2ab2036b
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template.py
@@ -0,0 +1,69 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["AutoscalePoolDropletTemplate"]
+
+
+class AutoscalePoolDropletTemplate(BaseModel):
+ image: str
+ """The Droplet image to be used for all Droplets in the autoscale pool.
+
+ You may specify the slug or the image ID.
+ """
+
+ region: Literal[
+ "nyc1", "nyc2", "nyc3", "ams2", "ams3", "sfo1", "sfo2", "sfo3", "sgp1", "lon1", "fra1", "tor1", "blr1", "syd1"
+ ]
+ """The datacenter in which all of the Droplets will be created."""
+
+ size: str
+ """The Droplet size to be used for all Droplets in the autoscale pool."""
+
+ ssh_keys: List[str]
+ """The SSH keys to be installed on the Droplets in the autoscale pool.
+
+ You can either specify the key ID or the fingerprint. Requires `ssh_key:read`
+ scope.
+ """
+
+ ipv6: Optional[bool] = None
+ """Assigns a unique IPv6 address to each of the Droplets in the autoscale pool."""
+
+ name: Optional[str] = None
+ """The name(s) to be applied to all Droplets in the autoscale pool."""
+
+ project_id: Optional[str] = None
+ """
+ The project that the Droplets in the autoscale pool will belong to. Requires
+ `project:read` scope.
+ """
+
+ tags: Optional[List[str]] = None
+ """
+ The tags to apply to each of the Droplets in the autoscale pool. Requires
+ `tag:read` scope.
+ """
+
+ user_data: Optional[str] = None
+ """
+ A string containing user data that cloud-init consumes to configure a Droplet on
+ first boot. User data is often a cloud-config file or Bash script. It must be
+ plain text and may not exceed 64 KiB in size.
+ """
+
+ vpc_uuid: Optional[str] = None
+ """The VPC where the Droplets in the autoscale pool will be created.
+
+ The VPC must be in the region where you want to create the Droplets. Requires
+ `vpc:read` scope.
+ """
+
+ with_droplet_agent: Optional[bool] = None
+ """Installs the Droplet agent.
+
+ This must be set to true to monitor Droplets for resource utilization scaling.
+ """
diff --git a/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template_param.py b/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template_param.py
new file mode 100644
index 00000000..3eb8ac89
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template_param.py
@@ -0,0 +1,85 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = ["AutoscalePoolDropletTemplateParam"]
+
+
+class AutoscalePoolDropletTemplateParam(TypedDict, total=False):
+ image: Required[str]
+ """The Droplet image to be used for all Droplets in the autoscale pool.
+
+ You may specify the slug or the image ID.
+ """
+
+ region: Required[
+ Literal[
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "ams2",
+ "ams3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "lon1",
+ "fra1",
+ "tor1",
+ "blr1",
+ "syd1",
+ ]
+ ]
+ """The datacenter in which all of the Droplets will be created."""
+
+ size: Required[str]
+ """The Droplet size to be used for all Droplets in the autoscale pool."""
+
+ ssh_keys: Required[SequenceNotStr[str]]
+ """The SSH keys to be installed on the Droplets in the autoscale pool.
+
+ You can either specify the key ID or the fingerprint. Requires `ssh_key:read`
+ scope.
+ """
+
+ ipv6: bool
+ """Assigns a unique IPv6 address to each of the Droplets in the autoscale pool."""
+
+ name: str
+ """The name(s) to be applied to all Droplets in the autoscale pool."""
+
+ project_id: str
+ """
+ The project that the Droplets in the autoscale pool will belong to. Requires
+ `project:read` scope.
+ """
+
+ tags: SequenceNotStr[str]
+ """
+ The tags to apply to each of the Droplets in the autoscale pool. Requires
+ `tag:read` scope.
+ """
+
+ user_data: str
+ """
+ A string containing user data that cloud-init consumes to configure a Droplet on
+ first boot. User data is often a cloud-config file or Bash script. It must be
+ plain text and may not exceed 64 KiB in size.
+ """
+
+ vpc_uuid: str
+ """The VPC where the Droplets in the autoscale pool will be created.
+
+ The VPC must be in the region where you want to create the Droplets. Requires
+ `vpc:read` scope.
+ """
+
+ with_droplet_agent: bool
+ """Installs the Droplet agent.
+
+ This must be set to true to monitor Droplets for resource utilization scaling.
+ """
diff --git a/src/gradient/types/gpu_droplets/autoscale_pool_dynamic_config.py b/src/gradient/types/gpu_droplets/autoscale_pool_dynamic_config.py
new file mode 100644
index 00000000..10f9781b
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_pool_dynamic_config.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["AutoscalePoolDynamicConfig"]
+
+
+class AutoscalePoolDynamicConfig(BaseModel):
+ max_instances: int
+ """The maximum number of Droplets in an autoscale pool."""
+
+ min_instances: int
+ """The minimum number of Droplets in an autoscale pool."""
+
+ cooldown_minutes: Optional[int] = None
+ """The number of minutes to wait between scaling events in an autoscale pool.
+
+ Defaults to 10 minutes.
+ """
+
+ target_cpu_utilization: Optional[float] = None
+ """Target CPU utilization as a decimal."""
+
+ target_memory_utilization: Optional[float] = None
+ """Target memory utilization as a decimal."""
diff --git a/src/gradient/types/gpu_droplets/autoscale_pool_dynamic_config_param.py b/src/gradient/types/gpu_droplets/autoscale_pool_dynamic_config_param.py
new file mode 100644
index 00000000..af06e73a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_pool_dynamic_config_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["AutoscalePoolDynamicConfigParam"]
+
+
+class AutoscalePoolDynamicConfigParam(TypedDict, total=False):
+ max_instances: Required[int]
+ """The maximum number of Droplets in an autoscale pool."""
+
+ min_instances: Required[int]
+ """The minimum number of Droplets in an autoscale pool."""
+
+ cooldown_minutes: int
+ """The number of minutes to wait between scaling events in an autoscale pool.
+
+ Defaults to 10 minutes.
+ """
+
+ target_cpu_utilization: float
+ """Target CPU utilization as a decimal."""
+
+ target_memory_utilization: float
+ """Target memory utilization as a decimal."""
diff --git a/src/gradient/types/gpu_droplets/autoscale_pool_static_config.py b/src/gradient/types/gpu_droplets/autoscale_pool_static_config.py
new file mode 100644
index 00000000..cc891007
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_pool_static_config.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ..._models import BaseModel
+
+__all__ = ["AutoscalePoolStaticConfig"]
+
+
+class AutoscalePoolStaticConfig(BaseModel):
+ target_number_instances: int
+ """Fixed number of instances in an autoscale pool."""
diff --git a/src/gradient/types/gpu_droplets/autoscale_pool_static_config_param.py b/src/gradient/types/gpu_droplets/autoscale_pool_static_config_param.py
new file mode 100644
index 00000000..a7510d22
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_pool_static_config_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["AutoscalePoolStaticConfigParam"]
+
+
+class AutoscalePoolStaticConfigParam(TypedDict, total=False):
+ target_number_instances: Required[int]
+ """Fixed number of instances in an autoscale pool."""
diff --git a/src/gradient/types/gpu_droplets/autoscale_retrieve_response.py b/src/gradient/types/gpu_droplets/autoscale_retrieve_response.py
new file mode 100644
index 00000000..f383ed03
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .autoscale_pool import AutoscalePool
+
+__all__ = ["AutoscaleRetrieveResponse"]
+
+
+class AutoscaleRetrieveResponse(BaseModel):
+ autoscale_pool: Optional[AutoscalePool] = None
diff --git a/src/gradient/types/gpu_droplets/autoscale_update_params.py b/src/gradient/types/gpu_droplets/autoscale_update_params.py
new file mode 100644
index 00000000..1b96af1e
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_update_params.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Required, TypeAlias, TypedDict
+
+from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam
+from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam
+from .autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam
+
+__all__ = ["AutoscaleUpdateParams", "Config"]
+
+
+class AutoscaleUpdateParams(TypedDict, total=False):
+ config: Required[Config]
+ """
+ The scaling configuration for an autoscale pool, which is how the pool scales up
+ and down (either by resource utilization or static configuration).
+ """
+
+ droplet_template: Required[AutoscalePoolDropletTemplateParam]
+
+ name: Required[str]
+ """The human-readable name of the autoscale pool. This field cannot be updated"""
+
+
+Config: TypeAlias = Union[AutoscalePoolStaticConfigParam, AutoscalePoolDynamicConfigParam]
diff --git a/src/gradient/types/gpu_droplets/autoscale_update_response.py b/src/gradient/types/gpu_droplets/autoscale_update_response.py
new file mode 100644
index 00000000..09dde2a4
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/autoscale_update_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .autoscale_pool import AutoscalePool
+
+__all__ = ["AutoscaleUpdateResponse"]
+
+
+class AutoscaleUpdateResponse(BaseModel):
+ autoscale_pool: Optional[AutoscalePool] = None
diff --git a/src/gradient/types/gpu_droplets/backup_list_params.py b/src/gradient/types/gpu_droplets/backup_list_params.py
new file mode 100644
index 00000000..66fe92aa
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/backup_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["BackupListParams"]
+
+
+class BackupListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/backup_list_policies_params.py b/src/gradient/types/gpu_droplets/backup_list_policies_params.py
new file mode 100644
index 00000000..0cdb0ddb
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/backup_list_policies_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["BackupListPoliciesParams"]
+
+
+class BackupListPoliciesParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/backup_list_policies_response.py b/src/gradient/types/gpu_droplets/backup_list_policies_response.py
new file mode 100644
index 00000000..73aa9458
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/backup_list_policies_response.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+
+from ..._models import BaseModel
+from ..shared.page_links import PageLinks
+from ..droplet_backup_policy import DropletBackupPolicy
+from ..shared.meta_properties import MetaProperties
+from ..shared.droplet_next_backup_window import DropletNextBackupWindow
+
+__all__ = ["BackupListPoliciesResponse", "Policies"]
+
+
+class Policies(BaseModel):
+ backup_enabled: Optional[bool] = None
+ """A boolean value indicating whether backups are enabled for the Droplet."""
+
+ backup_policy: Optional[DropletBackupPolicy] = None
+ """An object specifying the backup policy for the Droplet."""
+
+ droplet_id: Optional[int] = None
+ """The unique identifier for the Droplet."""
+
+ next_backup_window: Optional[DropletNextBackupWindow] = None
+ """
+ An object containing keys with the start and end times of the window during
+ which the backup will occur.
+ """
+
+
+class BackupListPoliciesResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ links: Optional[PageLinks] = None
+
+ policies: Optional[Dict[str, Policies]] = None
+ """
+ A map where the keys are the Droplet IDs and the values are objects containing
+ the backup policy information for each Droplet.
+ """
diff --git a/src/gradient/types/gpu_droplets/backup_list_response.py b/src/gradient/types/gpu_droplets/backup_list_response.py
new file mode 100644
index 00000000..c96d573a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/backup_list_response.py
@@ -0,0 +1,53 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["BackupListResponse", "Backup"]
+
+
+class Backup(BaseModel):
+ id: int
+ """The unique identifier for the snapshot or backup."""
+
+ created_at: datetime
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the snapshot was created.
+ """
+
+ min_disk_size: int
+ """The minimum size in GB required for a volume or Droplet to use this snapshot."""
+
+ name: str
+ """A human-readable name for the snapshot."""
+
+ regions: List[str]
+ """An array of the regions that the snapshot is available in.
+
+ The regions are represented by their identifying slug values.
+ """
+
+ size_gigabytes: float
+ """The billable size of the snapshot in gigabytes."""
+
+ type: Literal["snapshot", "backup"]
+ """Describes the kind of image.
+
+ It may be one of `snapshot` or `backup`. This specifies whether an image is a
+ user-generated Droplet snapshot or automatically created Droplet backup.
+ """
+
+
+class BackupListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ backups: Optional[List[Backup]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/backup_list_supported_policies_response.py b/src/gradient/types/gpu_droplets/backup_list_supported_policies_response.py
new file mode 100644
index 00000000..219cfc34
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/backup_list_supported_policies_response.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["BackupListSupportedPoliciesResponse", "SupportedPolicy"]
+
+
+class SupportedPolicy(BaseModel):
+ name: Optional[str] = None
+ """The name of the Droplet backup plan."""
+
+ possible_days: Optional[List[str]] = None
+ """The day of the week the backup will occur."""
+
+ possible_window_starts: Optional[List[int]] = None
+ """An array of integers representing the hours of the day that a backup can start."""
+
+ retention_period_days: Optional[int] = None
+ """The number of days that a backup will be kept."""
+
+ window_length_hours: Optional[int] = None
+ """The number of hours that a backup window is open."""
+
+
+class BackupListSupportedPoliciesResponse(BaseModel):
+ supported_policies: Optional[List[SupportedPolicy]] = None
diff --git a/src/gradient/types/gpu_droplets/backup_retrieve_policy_response.py b/src/gradient/types/gpu_droplets/backup_retrieve_policy_response.py
new file mode 100644
index 00000000..38288dea
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/backup_retrieve_policy_response.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from ..droplet_backup_policy import DropletBackupPolicy
+from ..shared.droplet_next_backup_window import DropletNextBackupWindow
+
+__all__ = ["BackupRetrievePolicyResponse", "Policy"]
+
+
+class Policy(BaseModel):
+ backup_enabled: Optional[bool] = None
+ """A boolean value indicating whether backups are enabled for the Droplet."""
+
+ backup_policy: Optional[DropletBackupPolicy] = None
+ """An object specifying the backup policy for the Droplet."""
+
+ droplet_id: Optional[int] = None
+ """The unique identifier for the Droplet."""
+
+ next_backup_window: Optional[DropletNextBackupWindow] = None
+ """
+ An object containing keys with the start and end times of the window during
+ which the backup will occur.
+ """
+
+
+class BackupRetrievePolicyResponse(BaseModel):
+ policy: Optional[Policy] = None
diff --git a/src/gradient/types/gpu_droplets/current_utilization.py b/src/gradient/types/gpu_droplets/current_utilization.py
new file mode 100644
index 00000000..f2cb0b6c
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/current_utilization.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["CurrentUtilization"]
+
+
+class CurrentUtilization(BaseModel):
+ cpu: Optional[float] = None
+ """The average CPU utilization of the autoscale pool."""
+
+ memory: Optional[float] = None
+ """The average memory utilization of the autoscale pool."""
diff --git a/src/gradient/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py
new file mode 100644
index 00000000..8dd32c14
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ..._models import BaseModel
+from .destroyed_associated_resource import DestroyedAssociatedResource
+
+__all__ = ["DestroyWithAssociatedResourceCheckStatusResponse", "Resources"]
+
+
+class Resources(BaseModel):
+ """
+ An object containing additional information about resource related to a Droplet requested to be destroyed.
+ """
+
+ floating_ips: Optional[List[DestroyedAssociatedResource]] = None
+
+ reserved_ips: Optional[List[DestroyedAssociatedResource]] = None
+
+ snapshots: Optional[List[DestroyedAssociatedResource]] = None
+
+ volume_snapshots: Optional[List[DestroyedAssociatedResource]] = None
+
+ volumes: Optional[List[DestroyedAssociatedResource]] = None
+
+
+class DestroyWithAssociatedResourceCheckStatusResponse(BaseModel):
+ """An objects containing information about a resources scheduled for deletion."""
+
+ completed_at: Optional[datetime] = None
+ """
+ A time value given in ISO8601 combined date and time format indicating when the
+ requested action was completed.
+ """
+
+ droplet: Optional[DestroyedAssociatedResource] = None
+ """An object containing information about a resource scheduled for deletion."""
+
+ failures: Optional[int] = None
+ """A count of the associated resources that failed to be destroyed, if any."""
+
+ resources: Optional[Resources] = None
+ """
+ An object containing additional information about resource related to a Droplet
+ requested to be destroyed.
+ """
diff --git a/src/gradient/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py
new file mode 100644
index 00000000..9a9730e7
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = ["DestroyWithAssociatedResourceDeleteSelectiveParams"]
+
+
+class DestroyWithAssociatedResourceDeleteSelectiveParams(TypedDict, total=False):
+ floating_ips: SequenceNotStr[str]
+ """
+ An array of unique identifiers for the floating IPs to be scheduled for
+ deletion.
+ """
+
+ reserved_ips: SequenceNotStr[str]
+ """
+ An array of unique identifiers for the reserved IPs to be scheduled for
+ deletion.
+ """
+
+ snapshots: SequenceNotStr[str]
+ """An array of unique identifiers for the snapshots to be scheduled for deletion."""
+
+ volume_snapshots: SequenceNotStr[str]
+ """
+ An array of unique identifiers for the volume snapshots to be scheduled for
+ deletion.
+ """
+
+ volumes: SequenceNotStr[str]
+ """An array of unique identifiers for the volumes to be scheduled for deletion."""
diff --git a/src/gradient/types/gpu_droplets/destroy_with_associated_resource_list_response.py b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_list_response.py
new file mode 100644
index 00000000..ef4c6c99
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_list_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .associated_resource import AssociatedResource
+
+__all__ = ["DestroyWithAssociatedResourceListResponse"]
+
+
+class DestroyWithAssociatedResourceListResponse(BaseModel):
+ floating_ips: Optional[List[AssociatedResource]] = None
+ """
+ Floating IPs that are associated with this Droplet. Requires `reserved_ip:read`
+ scope.
+ """
+
+ reserved_ips: Optional[List[AssociatedResource]] = None
+ """
+ Reserved IPs that are associated with this Droplet. Requires `reserved_ip:read`
+ scope.
+ """
+
+ snapshots: Optional[List[AssociatedResource]] = None
+ """Snapshots that are associated with this Droplet. Requires `image:read` scope."""
+
+ volume_snapshots: Optional[List[AssociatedResource]] = None
+ """
+ Volume Snapshots that are associated with this Droplet. Requires
+ `block_storage_snapshot:read` scope.
+ """
+
+ volumes: Optional[List[AssociatedResource]] = None
+ """
+ Volumes that are associated with this Droplet. Requires `block_storage:read`
+ scope.
+ """
diff --git a/src/gradient/types/gpu_droplets/destroyed_associated_resource.py b/src/gradient/types/gpu_droplets/destroyed_associated_resource.py
new file mode 100644
index 00000000..fd3784e4
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/destroyed_associated_resource.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+
+from ..._models import BaseModel
+
+__all__ = ["DestroyedAssociatedResource"]
+
+
+class DestroyedAssociatedResource(BaseModel):
+ """An object containing information about a resource scheduled for deletion."""
+
+ id: Optional[str] = None
+ """The unique identifier for the resource scheduled for deletion."""
+
+ destroyed_at: Optional[datetime] = None
+ """
+ A time value given in ISO8601 combined date and time format indicating when the
+ resource was destroyed if the request was successful.
+ """
+
+ error_message: Optional[str] = None
+ """
+ A string indicating that the resource was not successfully destroyed and
+ providing additional information.
+ """
+
+ name: Optional[str] = None
+ """The name of the resource scheduled for deletion."""
diff --git a/src/gradient/types/gpu_droplets/domains.py b/src/gradient/types/gpu_droplets/domains.py
new file mode 100644
index 00000000..14d4a0bb
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/domains.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["Domains"]
+
+
+class Domains(BaseModel):
+ """An object specifying domain configurations for a Global load balancer."""
+
+ certificate_id: Optional[str] = None
+ """The ID of the TLS certificate used for SSL termination."""
+
+ is_managed: Optional[bool] = None
+ """A boolean value indicating if the domain is already managed by DigitalOcean.
+
+ If true, all A and AAAA records required to enable Global load balancers will be
+ automatically added.
+ """
+
+ name: Optional[str] = None
+ """FQDN to associate with a Global load balancer."""
diff --git a/src/gradient/types/gpu_droplets/domains_param.py b/src/gradient/types/gpu_droplets/domains_param.py
new file mode 100644
index 00000000..44481775
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/domains_param.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["DomainsParam"]
+
+
+class DomainsParam(TypedDict, total=False):
+ """An object specifying domain configurations for a Global load balancer."""
+
+ certificate_id: str
+ """The ID of the TLS certificate used for SSL termination."""
+
+ is_managed: bool
+ """A boolean value indicating if the domain is already managed by DigitalOcean.
+
+ If true, all A and AAAA records required to enable Global load balancers will be
+ automatically added.
+ """
+
+ name: str
+ """FQDN to associate with a Global load balancer."""
diff --git a/src/gradient/types/gpu_droplets/firewall.py b/src/gradient/types/gpu_droplets/firewall.py
new file mode 100644
index 00000000..0eb352a1
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewall.py
@@ -0,0 +1,98 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.firewall_rule_target import FirewallRuleTarget
+
+__all__ = ["Firewall", "InboundRule", "OutboundRule", "PendingChange"]
+
+
+class InboundRule(BaseModel):
+ ports: str
+ """
+ The ports on which traffic will be allowed specified as a string containing a
+ single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a
+ protocol. For ICMP rules this parameter will always return "0".
+ """
+
+ protocol: Literal["tcp", "udp", "icmp"]
+ """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`."""
+
+ sources: FirewallRuleTarget
+ """An object specifying locations from which inbound traffic will be accepted."""
+
+
+class OutboundRule(BaseModel):
+ destinations: FirewallRuleTarget
+ """An object specifying locations to which outbound traffic that will be allowed."""
+
+ ports: str
+ """
+ The ports on which traffic will be allowed specified as a string containing a
+ single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a
+ protocol. For ICMP rules this parameter will always return "0".
+ """
+
+ protocol: Literal["tcp", "udp", "icmp"]
+ """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`."""
+
+
+class PendingChange(BaseModel):
+ droplet_id: Optional[int] = None
+
+ removing: Optional[bool] = None
+
+ status: Optional[str] = None
+
+
+class Firewall(BaseModel):
+ id: Optional[str] = None
+ """A unique ID that can be used to identify and reference a firewall."""
+
+ created_at: Optional[datetime] = None
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the firewall was created.
+ """
+
+ droplet_ids: Optional[List[int]] = None
+ """An array containing the IDs of the Droplets assigned to the firewall.
+
+ Requires `droplet:read` scope.
+ """
+
+ inbound_rules: Optional[List[InboundRule]] = None
+
+ name: Optional[str] = None
+ """A human-readable name for a firewall.
+
+ The name must begin with an alphanumeric character. Subsequent characters must
+ either be alphanumeric characters, a period (.), or a dash (-).
+ """
+
+ outbound_rules: Optional[List[OutboundRule]] = None
+
+ pending_changes: Optional[List[PendingChange]] = None
+ """
+ An array of objects each containing the fields "droplet_id", "removing", and
+ "status". It is provided to detail exactly which Droplets are having their
+ security policies updated. When empty, all changes have been successfully
+ applied.
+ """
+
+ status: Optional[Literal["waiting", "succeeded", "failed"]] = None
+ """A status string indicating the current state of the firewall.
+
+ This can be "waiting", "succeeded", or "failed".
+ """
+
+ tags: Optional[List[str]] = None
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names must exist in order to be referenced in a request.
+
+ Requires `tag:create` and `tag:read` scopes.
+ """
diff --git a/src/gradient/types/gpu_droplets/firewall_create_params.py b/src/gradient/types/gpu_droplets/firewall_create_params.py
new file mode 100644
index 00000000..b10ae98e
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewall_create_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from .firewall_param import FirewallParam
+
+__all__ = ["FirewallCreateParams", "Body"]
+
+
+class FirewallCreateParams(TypedDict, total=False):
+ body: Body
+
+
+class Body(FirewallParam, total=False):
+ pass
diff --git a/src/gradient/types/gpu_droplets/firewall_create_response.py b/src/gradient/types/gpu_droplets/firewall_create_response.py
new file mode 100644
index 00000000..be30113a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewall_create_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .firewall import Firewall
+from ..._models import BaseModel
+
+__all__ = ["FirewallCreateResponse"]
+
+
+class FirewallCreateResponse(BaseModel):
+ firewall: Optional[Firewall] = None
diff --git a/src/gradient/types/gpu_droplets/firewall_list_params.py b/src/gradient/types/gpu_droplets/firewall_list_params.py
new file mode 100644
index 00000000..155cc480
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewall_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["FirewallListParams"]
+
+
+class FirewallListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/firewall_list_response.py b/src/gradient/types/gpu_droplets/firewall_list_response.py
new file mode 100644
index 00000000..ec0af688
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewall_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .firewall import Firewall
+from ..._models import BaseModel
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["FirewallListResponse"]
+
+
+class FirewallListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ firewalls: Optional[List[Firewall]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/firewall_param.py b/src/gradient/types/gpu_droplets/firewall_param.py
new file mode 100644
index 00000000..8b5a5a15
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewall_param.py
@@ -0,0 +1,68 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from ..._types import SequenceNotStr
+from ..shared_params.firewall_rule_target import FirewallRuleTarget
+
+__all__ = ["FirewallParam", "InboundRule", "OutboundRule"]
+
+
+class InboundRule(TypedDict, total=False):
+ ports: Required[str]
+ """
+ The ports on which traffic will be allowed specified as a string containing a
+ single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a
+ protocol. For ICMP rules this parameter will always return "0".
+ """
+
+ protocol: Required[Literal["tcp", "udp", "icmp"]]
+ """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`."""
+
+ sources: Required[FirewallRuleTarget]
+ """An object specifying locations from which inbound traffic will be accepted."""
+
+
+class OutboundRule(TypedDict, total=False):
+ destinations: Required[FirewallRuleTarget]
+ """An object specifying locations to which outbound traffic that will be allowed."""
+
+ ports: Required[str]
+ """
+ The ports on which traffic will be allowed specified as a string containing a
+ single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a
+ protocol. For ICMP rules this parameter will always return "0".
+ """
+
+ protocol: Required[Literal["tcp", "udp", "icmp"]]
+ """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`."""
+
+
+class FirewallParam(TypedDict, total=False):
+ droplet_ids: Optional[Iterable[int]]
+ """An array containing the IDs of the Droplets assigned to the firewall.
+
+ Requires `droplet:read` scope.
+ """
+
+ inbound_rules: Optional[Iterable[InboundRule]]
+
+ name: str
+ """A human-readable name for a firewall.
+
+ The name must begin with an alphanumeric character. Subsequent characters must
+ either be alphanumeric characters, a period (.), or a dash (-).
+ """
+
+ outbound_rules: Optional[Iterable[OutboundRule]]
+
+ tags: Optional[SequenceNotStr[str]]
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names must exist in order to be referenced in a request.
+
+ Requires `tag:create` and `tag:read` scopes.
+ """
diff --git a/src/gradient/types/gpu_droplets/firewall_retrieve_response.py b/src/gradient/types/gpu_droplets/firewall_retrieve_response.py
new file mode 100644
index 00000000..bb29a174
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewall_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .firewall import Firewall
+from ..._models import BaseModel
+
+__all__ = ["FirewallRetrieveResponse"]
+
+
+class FirewallRetrieveResponse(BaseModel):
+ firewall: Optional[Firewall] = None
diff --git a/src/gradient/types/gpu_droplets/firewall_update_params.py b/src/gradient/types/gpu_droplets/firewall_update_params.py
new file mode 100644
index 00000000..c2d0691d
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewall_update_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .firewall_param import FirewallParam
+
+__all__ = ["FirewallUpdateParams"]
+
+
+class FirewallUpdateParams(TypedDict, total=False):
+ firewall: Required[FirewallParam]
diff --git a/src/gradient/types/gpu_droplets/firewall_update_response.py b/src/gradient/types/gpu_droplets/firewall_update_response.py
new file mode 100644
index 00000000..cb8ff702
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewall_update_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .firewall import Firewall
+from ..._models import BaseModel
+
+__all__ = ["FirewallUpdateResponse"]
+
+
+class FirewallUpdateResponse(BaseModel):
+ firewall: Optional[Firewall] = None
diff --git a/src/gradient/types/gpu_droplets/firewalls/__init__.py b/src/gradient/types/gpu_droplets/firewalls/__init__.py
new file mode 100644
index 00000000..6ba459d9
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewalls/__init__.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .tag_add_params import TagAddParams as TagAddParams
+from .rule_add_params import RuleAddParams as RuleAddParams
+from .tag_remove_params import TagRemoveParams as TagRemoveParams
+from .droplet_add_params import DropletAddParams as DropletAddParams
+from .rule_remove_params import RuleRemoveParams as RuleRemoveParams
+from .droplet_remove_params import DropletRemoveParams as DropletRemoveParams
diff --git a/src/gradient/types/gpu_droplets/firewalls/droplet_add_params.py b/src/gradient/types/gpu_droplets/firewalls/droplet_add_params.py
new file mode 100644
index 00000000..35a403a5
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewalls/droplet_add_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Required, TypedDict
+
+__all__ = ["DropletAddParams"]
+
+
+class DropletAddParams(TypedDict, total=False):
+ droplet_ids: Required[Iterable[int]]
+ """An array containing the IDs of the Droplets to be assigned to the firewall."""
diff --git a/src/gradient/types/gpu_droplets/firewalls/droplet_remove_params.py b/src/gradient/types/gpu_droplets/firewalls/droplet_remove_params.py
new file mode 100644
index 00000000..5aea18e8
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewalls/droplet_remove_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Required, TypedDict
+
+__all__ = ["DropletRemoveParams"]
+
+
+class DropletRemoveParams(TypedDict, total=False):
+ droplet_ids: Required[Iterable[int]]
+ """An array containing the IDs of the Droplets to be removed from the firewall."""
diff --git a/src/gradient/types/gpu_droplets/firewalls/rule_add_params.py b/src/gradient/types/gpu_droplets/firewalls/rule_add_params.py
new file mode 100644
index 00000000..1f49e55a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewalls/rule_add_params.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from ...shared_params.firewall_rule_target import FirewallRuleTarget
+
+__all__ = ["RuleAddParams", "InboundRule", "OutboundRule"]
+
+
+class RuleAddParams(TypedDict, total=False):
+ inbound_rules: Optional[Iterable[InboundRule]]
+
+ outbound_rules: Optional[Iterable[OutboundRule]]
+
+
+class InboundRule(TypedDict, total=False):
+ ports: Required[str]
+ """
+ The ports on which traffic will be allowed specified as a string containing a
+ single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a
+ protocol. For ICMP rules this parameter will always return "0".
+ """
+
+ protocol: Required[Literal["tcp", "udp", "icmp"]]
+ """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`."""
+
+ sources: Required[FirewallRuleTarget]
+ """An object specifying locations from which inbound traffic will be accepted."""
+
+
+class OutboundRule(TypedDict, total=False):
+ destinations: Required[FirewallRuleTarget]
+ """An object specifying locations to which outbound traffic that will be allowed."""
+
+ ports: Required[str]
+ """
+ The ports on which traffic will be allowed specified as a string containing a
+ single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a
+ protocol. For ICMP rules this parameter will always return "0".
+ """
+
+ protocol: Required[Literal["tcp", "udp", "icmp"]]
+ """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`."""
diff --git a/src/gradient/types/gpu_droplets/firewalls/rule_remove_params.py b/src/gradient/types/gpu_droplets/firewalls/rule_remove_params.py
new file mode 100644
index 00000000..b6bb05df
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewalls/rule_remove_params.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from ...shared_params.firewall_rule_target import FirewallRuleTarget
+
+__all__ = ["RuleRemoveParams", "InboundRule", "OutboundRule"]
+
+
+class RuleRemoveParams(TypedDict, total=False):
+ inbound_rules: Optional[Iterable[InboundRule]]
+
+ outbound_rules: Optional[Iterable[OutboundRule]]
+
+
+class InboundRule(TypedDict, total=False):
+ ports: Required[str]
+ """
+ The ports on which traffic will be allowed specified as a string containing a
+ single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a
+ protocol. For ICMP rules this parameter will always return "0".
+ """
+
+ protocol: Required[Literal["tcp", "udp", "icmp"]]
+ """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`."""
+
+ sources: Required[FirewallRuleTarget]
+ """An object specifying locations from which inbound traffic will be accepted."""
+
+
+class OutboundRule(TypedDict, total=False):
+ destinations: Required[FirewallRuleTarget]
+ """An object specifying locations to which outbound traffic that will be allowed."""
+
+ ports: Required[str]
+ """
+ The ports on which traffic will be allowed specified as a string containing a
+ single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a
+ protocol. For ICMP rules this parameter will always return "0".
+ """
+
+ protocol: Required[Literal["tcp", "udp", "icmp"]]
+ """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`."""
diff --git a/src/gradient/types/gpu_droplets/firewalls/tag_add_params.py b/src/gradient/types/gpu_droplets/firewalls/tag_add_params.py
new file mode 100644
index 00000000..c3b9696e
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewalls/tag_add_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["TagAddParams"]
+
+
+class TagAddParams(TypedDict, total=False):
+ tags: Required[Optional[SequenceNotStr[str]]]
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names must exist in order to be referenced in a request.
+
+ Requires `tag:create` and `tag:read` scopes.
+ """
diff --git a/src/gradient/types/gpu_droplets/firewalls/tag_remove_params.py b/src/gradient/types/gpu_droplets/firewalls/tag_remove_params.py
new file mode 100644
index 00000000..bdd848f3
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/firewalls/tag_remove_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["TagRemoveParams"]
+
+
+class TagRemoveParams(TypedDict, total=False):
+ tags: Required[Optional[SequenceNotStr[str]]]
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names must exist in order to be referenced in a request.
+
+ Requires `tag:create` and `tag:read` scopes.
+ """
diff --git a/src/gradient/types/gpu_droplets/floating_ip.py b/src/gradient/types/gpu_droplets/floating_ip.py
new file mode 100644
index 00000000..f592d510
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ip.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import TypeAlias
+
+from ..shared import region, droplet
+from ..._models import BaseModel
+
+__all__ = ["FloatingIP", "Droplet", "Region"]
+
+Droplet: TypeAlias = Union[droplet.Droplet, Optional[object]]
+
+
+class Region(region.Region):
+ """The region that the floating IP is reserved to.
+
+ When you query a floating IP, the entire region object will be returned.
+ """
+
+ pass
+
+
+class FloatingIP(BaseModel):
+ droplet: Optional[Droplet] = None
+ """The Droplet that the floating IP has been assigned to.
+
+ When you query a floating IP, if it is assigned to a Droplet, the entire Droplet
+ object will be returned. If it is not assigned, the value will be null.
+
+ Requires `droplet:read` scope.
+ """
+
+ ip: Optional[str] = None
+ """The public IP address of the floating IP. It also serves as its identifier."""
+
+ locked: Optional[bool] = None
+ """
+ A boolean value indicating whether or not the floating IP has pending actions
+ preventing new ones from being submitted.
+ """
+
+ project_id: Optional[str] = None
+ """The UUID of the project to which the reserved IP currently belongs.
+
+ Requires `project:read` scope.
+ """
+
+ region: Optional[Region] = None
+ """The region that the floating IP is reserved to.
+
+ When you query a floating IP, the entire region object will be returned.
+ """
diff --git a/src/gradient/types/gpu_droplets/floating_ip_create_params.py b/src/gradient/types/gpu_droplets/floating_ip_create_params.py
new file mode 100644
index 00000000..2adadc27
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ip_create_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Required, TypeAlias, TypedDict
+
+__all__ = ["FloatingIPCreateParams", "AssignToDroplet", "ReserveToRegion"]
+
+
+class AssignToDroplet(TypedDict, total=False):
+ droplet_id: Required[int]
+ """The ID of the Droplet that the floating IP will be assigned to."""
+
+
+class ReserveToRegion(TypedDict, total=False):
+ region: Required[str]
+ """The slug identifier for the region the floating IP will be reserved to."""
+
+ project_id: str
+ """The UUID of the project to which the floating IP will be assigned."""
+
+
+FloatingIPCreateParams: TypeAlias = Union[AssignToDroplet, ReserveToRegion]
diff --git a/src/gradient/types/gpu_droplets/floating_ip_create_response.py b/src/gradient/types/gpu_droplets/floating_ip_create_response.py
new file mode 100644
index 00000000..04668b84
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ip_create_response.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .floating_ip import FloatingIP
+from ..shared.action_link import ActionLink
+
+__all__ = ["FloatingIPCreateResponse", "Links"]
+
+
+class Links(BaseModel):
+ actions: Optional[List[ActionLink]] = None
+
+ droplets: Optional[List[ActionLink]] = None
+
+
+class FloatingIPCreateResponse(BaseModel):
+ floating_ip: Optional[FloatingIP] = None
+
+ links: Optional[Links] = None
diff --git a/src/gradient/types/gpu_droplets/floating_ip_list_params.py b/src/gradient/types/gpu_droplets/floating_ip_list_params.py
new file mode 100644
index 00000000..2e054075
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ip_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["FloatingIPListParams"]
+
+
+class FloatingIPListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/floating_ip_list_response.py b/src/gradient/types/gpu_droplets/floating_ip_list_response.py
new file mode 100644
index 00000000..734011d2
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ip_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .floating_ip import FloatingIP
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["FloatingIPListResponse"]
+
+
+class FloatingIPListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ floating_ips: Optional[List[FloatingIP]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/floating_ip_retrieve_response.py b/src/gradient/types/gpu_droplets/floating_ip_retrieve_response.py
new file mode 100644
index 00000000..b7ec77d4
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ip_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .floating_ip import FloatingIP
+
+__all__ = ["FloatingIPRetrieveResponse"]
+
+
+class FloatingIPRetrieveResponse(BaseModel):
+ floating_ip: Optional[FloatingIP] = None
diff --git a/src/gradient/types/gpu_droplets/floating_ips/__init__.py b/src/gradient/types/gpu_droplets/floating_ips/__init__.py
new file mode 100644
index 00000000..a597418e
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ips/__init__.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .action_create_params import ActionCreateParams as ActionCreateParams
+from .action_list_response import ActionListResponse as ActionListResponse
+from .action_create_response import ActionCreateResponse as ActionCreateResponse
+from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse
diff --git a/src/gradient/types/gpu_droplets/floating_ips/action_create_params.py b/src/gradient/types/gpu_droplets/floating_ips/action_create_params.py
new file mode 100644
index 00000000..c84f5df7
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ips/action_create_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = ["ActionCreateParams", "FloatingIPActionUnassign", "FloatingIPActionAssign"]
+
+
+class FloatingIPActionUnassign(TypedDict, total=False):
+ type: Required[Literal["assign", "unassign"]]
+ """The type of action to initiate for the floating IP."""
+
+
+class FloatingIPActionAssign(TypedDict, total=False):
+ droplet_id: Required[int]
+ """The ID of the Droplet that the floating IP will be assigned to."""
+
+ type: Required[Literal["assign", "unassign"]]
+ """The type of action to initiate for the floating IP."""
+
+
+ActionCreateParams: TypeAlias = Union[FloatingIPActionUnassign, FloatingIPActionAssign]
diff --git a/src/gradient/types/gpu_droplets/floating_ips/action_create_response.py b/src/gradient/types/gpu_droplets/floating_ips/action_create_response.py
new file mode 100644
index 00000000..90acd8c9
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ips/action_create_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...shared import action
+from ...._models import BaseModel
+
+__all__ = ["ActionCreateResponse", "Action"]
+
+
+class Action(action.Action):
+ project_id: Optional[str] = None
+ """The UUID of the project to which the reserved IP currently belongs."""
+
+
+class ActionCreateResponse(BaseModel):
+ action: Optional[Action] = None
diff --git a/src/gradient/types/gpu_droplets/floating_ips/action_list_response.py b/src/gradient/types/gpu_droplets/floating_ips/action_list_response.py
new file mode 100644
index 00000000..2f4edac5
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ips/action_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.action import Action
+from ...shared.page_links import PageLinks
+from ...shared.meta_properties import MetaProperties
+
+__all__ = ["ActionListResponse"]
+
+
+class ActionListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ actions: Optional[List[Action]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/floating_ips/action_retrieve_response.py b/src/gradient/types/gpu_droplets/floating_ips/action_retrieve_response.py
new file mode 100644
index 00000000..d94554be
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/floating_ips/action_retrieve_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...shared import action
+from ...._models import BaseModel
+
+__all__ = ["ActionRetrieveResponse", "Action"]
+
+
+class Action(action.Action):
+ project_id: Optional[str] = None
+ """The UUID of the project to which the reserved IP currently belongs."""
+
+
+class ActionRetrieveResponse(BaseModel):
+ action: Optional[Action] = None
diff --git a/src/gradient/types/gpu_droplets/forwarding_rule.py b/src/gradient/types/gpu_droplets/forwarding_rule.py
new file mode 100644
index 00000000..a9345e05
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/forwarding_rule.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ForwardingRule"]
+
+
+class ForwardingRule(BaseModel):
+ """An object specifying a forwarding rule for a load balancer."""
+
+ entry_port: int
+ """
+ An integer representing the port on which the load balancer instance will
+ listen.
+ """
+
+ entry_protocol: Literal["http", "https", "http2", "http3", "tcp", "udp"]
+ """The protocol used for traffic to the load balancer.
+
+ The possible values are: `http`, `https`, `http2`, `http3`, `tcp`, or `udp`. If
+ you set the `entry_protocol` to `udp`, the `target_protocol` must be set to
+ `udp`. When using UDP, the load balancer requires that you set up a health check
+ with a port that uses TCP, HTTP, or HTTPS to work properly.
+ """
+
+ target_port: int
+ """
+ An integer representing the port on the backend Droplets to which the load
+ balancer will send traffic.
+ """
+
+ target_protocol: Literal["http", "https", "http2", "tcp", "udp"]
+ """The protocol used for traffic from the load balancer to the backend Droplets.
+
+ The possible values are: `http`, `https`, `http2`, `tcp`, or `udp`. If you set
+ the `target_protocol` to `udp`, the `entry_protocol` must be set to `udp`. When
+ using UDP, the load balancer requires that you set up a health check with a port
+ that uses TCP, HTTP, or HTTPS to work properly.
+ """
+
+ certificate_id: Optional[str] = None
+ """The ID of the TLS certificate used for SSL termination if enabled."""
+
+ tls_passthrough: Optional[bool] = None
+ """
+ A boolean value indicating whether SSL encrypted traffic will be passed through
+ to the backend Droplets.
+ """
diff --git a/src/gradient/types/gpu_droplets/forwarding_rule_param.py b/src/gradient/types/gpu_droplets/forwarding_rule_param.py
new file mode 100644
index 00000000..f81dfd6b
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/forwarding_rule_param.py
@@ -0,0 +1,50 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ForwardingRuleParam"]
+
+
+class ForwardingRuleParam(TypedDict, total=False):
+ """An object specifying a forwarding rule for a load balancer."""
+
+ entry_port: Required[int]
+ """
+ An integer representing the port on which the load balancer instance will
+ listen.
+ """
+
+ entry_protocol: Required[Literal["http", "https", "http2", "http3", "tcp", "udp"]]
+ """The protocol used for traffic to the load balancer.
+
+ The possible values are: `http`, `https`, `http2`, `http3`, `tcp`, or `udp`. If
+ you set the `entry_protocol` to `udp`, the `target_protocol` must be set to
+ `udp`. When using UDP, the load balancer requires that you set up a health check
+ with a port that uses TCP, HTTP, or HTTPS to work properly.
+ """
+
+ target_port: Required[int]
+ """
+ An integer representing the port on the backend Droplets to which the load
+ balancer will send traffic.
+ """
+
+ target_protocol: Required[Literal["http", "https", "http2", "tcp", "udp"]]
+ """The protocol used for traffic from the load balancer to the backend Droplets.
+
+ The possible values are: `http`, `https`, `http2`, `tcp`, or `udp`. If you set
+ the `target_protocol` to `udp`, the `entry_protocol` must be set to `udp`. When
+ using UDP, the load balancer requires that you set up a health check with a port
+ that uses TCP, HTTP, or HTTPS to work properly.
+ """
+
+ certificate_id: str
+ """The ID of the TLS certificate used for SSL termination if enabled."""
+
+ tls_passthrough: bool
+ """
+ A boolean value indicating whether SSL encrypted traffic will be passed through
+ to the backend Droplets.
+ """
diff --git a/src/gradient/types/gpu_droplets/glb_settings.py b/src/gradient/types/gpu_droplets/glb_settings.py
new file mode 100644
index 00000000..0af332eb
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/glb_settings.py
@@ -0,0 +1,49 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["GlbSettings", "Cdn"]
+
+
+class Cdn(BaseModel):
+ """An object specifying CDN configurations for a Global load balancer."""
+
+ is_enabled: Optional[bool] = None
+ """A boolean flag to enable CDN caching."""
+
+
+class GlbSettings(BaseModel):
+ """An object specifying forwarding configurations for a Global load balancer."""
+
+ cdn: Optional[Cdn] = None
+ """An object specifying CDN configurations for a Global load balancer."""
+
+ failover_threshold: Optional[int] = None
+ """
+ An integer value as a percentage to indicate failure threshold to decide how the
+ regional priorities will take effect. A value of `50` would indicate that the
+ Global load balancer will choose a lower priority region to forward traffic to
+ once this failure threshold has been reached for the higher priority region.
+ """
+
+ region_priorities: Optional[Dict[str, int]] = None
+ """
+ A map of region string to an integer priority value indicating preference for
+ which regional target a Global load balancer will forward traffic to. A lower
+ value indicates a higher priority.
+ """
+
+ target_port: Optional[int] = None
+ """
+ An integer representing the port on the target backends which the load balancer
+ will forward traffic to.
+ """
+
+ target_protocol: Optional[Literal["http", "https", "http2"]] = None
+ """
+ The protocol used for forwarding traffic from the load balancer to the target
+ backends. The possible values are `http`, `https` and `http2`.
+ """
diff --git a/src/gradient/types/gpu_droplets/glb_settings_param.py b/src/gradient/types/gpu_droplets/glb_settings_param.py
new file mode 100644
index 00000000..a790d0ee
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/glb_settings_param.py
@@ -0,0 +1,49 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["GlbSettingsParam", "Cdn"]
+
+
+class Cdn(TypedDict, total=False):
+ """An object specifying CDN configurations for a Global load balancer."""
+
+ is_enabled: bool
+ """A boolean flag to enable CDN caching."""
+
+
+class GlbSettingsParam(TypedDict, total=False):
+ """An object specifying forwarding configurations for a Global load balancer."""
+
+ cdn: Cdn
+ """An object specifying CDN configurations for a Global load balancer."""
+
+ failover_threshold: int
+ """
+ An integer value as a percentage to indicate failure threshold to decide how the
+ regional priorities will take effect. A value of `50` would indicate that the
+ Global load balancer will choose a lower priority region to forward traffic to
+ once this failure threshold has been reached for the higher priority region.
+ """
+
+ region_priorities: Dict[str, int]
+ """
+ A map of region string to an integer priority value indicating preference for
+ which regional target a Global load balancer will forward traffic to. A lower
+ value indicates a higher priority.
+ """
+
+ target_port: int
+ """
+ An integer representing the port on the target backends which the load balancer
+ will forward traffic to.
+ """
+
+ target_protocol: Literal["http", "https", "http2"]
+ """
+ The protocol used for forwarding traffic from the load balancer to the target
+ backends. The possible values are `http`, `https` and `http2`.
+ """
diff --git a/src/gradient/types/gpu_droplets/health_check.py b/src/gradient/types/gpu_droplets/health_check.py
new file mode 100644
index 00000000..e20cbc65
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/health_check.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["HealthCheck"]
+
+
+class HealthCheck(BaseModel):
+ """An object specifying health check settings for the load balancer."""
+
+ check_interval_seconds: Optional[int] = None
+ """The number of seconds between between two consecutive health checks."""
+
+ healthy_threshold: Optional[int] = None
+ """
+ The number of times a health check must pass for a backend Droplet to be marked
+ "healthy" and be re-added to the pool.
+ """
+
+ path: Optional[str] = None
+ """
+ The path on the backend Droplets to which the load balancer instance will send a
+ request.
+ """
+
+ port: Optional[int] = None
+ """
+ An integer representing the port on the backend Droplets on which the health
+ check will attempt a connection.
+ """
+
+ protocol: Optional[Literal["http", "https", "tcp"]] = None
+ """The protocol used for health checks sent to the backend Droplets.
+
+ The possible values are `http`, `https`, or `tcp`.
+ """
+
+ response_timeout_seconds: Optional[int] = None
+ """
+ The number of seconds the load balancer instance will wait for a response until
+ marking a health check as failed.
+ """
+
+ unhealthy_threshold: Optional[int] = None
+ """
+ The number of times a health check must fail for a backend Droplet to be marked
+ "unhealthy" and be removed from the pool.
+ """
diff --git a/src/gradient/types/gpu_droplets/health_check_param.py b/src/gradient/types/gpu_droplets/health_check_param.py
new file mode 100644
index 00000000..47de9e45
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/health_check_param.py
@@ -0,0 +1,50 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["HealthCheckParam"]
+
+
+class HealthCheckParam(TypedDict, total=False):
+ """An object specifying health check settings for the load balancer."""
+
+ check_interval_seconds: int
+ """The number of seconds between between two consecutive health checks."""
+
+ healthy_threshold: int
+ """
+ The number of times a health check must pass for a backend Droplet to be marked
+ "healthy" and be re-added to the pool.
+ """
+
+ path: str
+ """
+ The path on the backend Droplets to which the load balancer instance will send a
+ request.
+ """
+
+ port: int
+ """
+ An integer representing the port on the backend Droplets on which the health
+ check will attempt a connection.
+ """
+
+ protocol: Literal["http", "https", "tcp"]
+ """The protocol used for health checks sent to the backend Droplets.
+
+ The possible values are `http`, `https`, or `tcp`.
+ """
+
+ response_timeout_seconds: int
+ """
+ The number of seconds the load balancer instance will wait for a response until
+ marking a health check as failed.
+ """
+
+ unhealthy_threshold: int
+ """
+ The number of times a health check must fail for a backend Droplet to be marked
+ "unhealthy" and be removed from the pool.
+ """
diff --git a/src/gradient/types/gpu_droplets/image_create_params.py b/src/gradient/types/gpu_droplets/image_create_params.py
new file mode 100644
index 00000000..baae3bf5
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/image_create_params.py
@@ -0,0 +1,83 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = ["ImageCreateParams"]
+
+
+class ImageCreateParams(TypedDict, total=False):
+ description: str
+ """An optional free-form text field to describe an image."""
+
+ distribution: Literal[
+ "Arch Linux",
+ "CentOS",
+ "CoreOS",
+ "Debian",
+ "Fedora",
+ "Fedora Atomic",
+ "FreeBSD",
+ "Gentoo",
+ "openSUSE",
+ "RancherOS",
+ "Rocky Linux",
+ "Ubuntu",
+ "Unknown",
+ ]
+ """The name of a custom image's distribution.
+
+ Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`,
+ `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`,
+ `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but
+ ignored, and `Unknown` will be used in its place.
+ """
+
+ name: str
+ """The display name that has been given to an image.
+
+ This is what is shown in the control panel and is generally a descriptive title
+ for the image in question.
+ """
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+ tags: Optional[SequenceNotStr[str]]
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+ """
+
+ url: str
+ """A URL from which the custom Linux virtual machine image may be retrieved.
+
+ The image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It
+ may be compressed using gzip or bzip2 and must be smaller than 100 GB after
+ being decompressed.
+ """
diff --git a/src/gradient/types/gpu_droplets/image_create_response.py b/src/gradient/types/gpu_droplets/image_create_response.py
new file mode 100644
index 00000000..87ebbb01
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/image_create_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from ..shared.image import Image
+
+__all__ = ["ImageCreateResponse"]
+
+
+class ImageCreateResponse(BaseModel):
+ image: Optional[Image] = None
diff --git a/src/gradient/types/gpu_droplets/image_list_params.py b/src/gradient/types/gpu_droplets/image_list_params.py
new file mode 100644
index 00000000..d8e90efa
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/image_list_params.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ImageListParams"]
+
+
+class ImageListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
+
+ private: bool
+ """Used to filter only user images."""
+
+ tag_name: str
+ """Used to filter images by a specific tag."""
+
+ type: Literal["application", "distribution"]
+ """
+ Filters results based on image type which can be either `application` or
+ `distribution`.
+ """
diff --git a/src/gradient/types/gpu_droplets/image_list_response.py b/src/gradient/types/gpu_droplets/image_list_response.py
new file mode 100644
index 00000000..d4bb5697
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/image_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.image import Image
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["ImageListResponse"]
+
+
+class ImageListResponse(BaseModel):
+ images: List[Image]
+
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/image_retrieve_response.py b/src/gradient/types/gpu_droplets/image_retrieve_response.py
new file mode 100644
index 00000000..394dd384
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/image_retrieve_response.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ..._models import BaseModel
+from ..shared.image import Image
+
+__all__ = ["ImageRetrieveResponse"]
+
+
+class ImageRetrieveResponse(BaseModel):
+ image: Image
diff --git a/src/gradient/types/gpu_droplets/image_update_params.py b/src/gradient/types/gpu_droplets/image_update_params.py
new file mode 100644
index 00000000..2ff851f8
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/image_update_params.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ImageUpdateParams"]
+
+
+class ImageUpdateParams(TypedDict, total=False):
+ description: str
+ """An optional free-form text field to describe an image."""
+
+ distribution: Literal[
+ "Arch Linux",
+ "CentOS",
+ "CoreOS",
+ "Debian",
+ "Fedora",
+ "Fedora Atomic",
+ "FreeBSD",
+ "Gentoo",
+ "openSUSE",
+ "RancherOS",
+ "Rocky Linux",
+ "Ubuntu",
+ "Unknown",
+ ]
+ """The name of a custom image's distribution.
+
+ Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`,
+ `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`,
+ `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but
+ ignored, and `Unknown` will be used in its place.
+ """
+
+ name: str
+ """The display name that has been given to an image.
+
+ This is what is shown in the control panel and is generally a descriptive title
+ for the image in question.
+ """
diff --git a/src/gradient/types/gpu_droplets/image_update_response.py b/src/gradient/types/gpu_droplets/image_update_response.py
new file mode 100644
index 00000000..3d07f5ac
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/image_update_response.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ..._models import BaseModel
+from ..shared.image import Image
+
+__all__ = ["ImageUpdateResponse"]
+
+
+class ImageUpdateResponse(BaseModel):
+ image: Image
diff --git a/src/gradient/types/gpu_droplets/images/__init__.py b/src/gradient/types/gpu_droplets/images/__init__.py
new file mode 100644
index 00000000..7e78954c
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/images/__init__.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .action_create_params import ActionCreateParams as ActionCreateParams
+from .action_list_response import ActionListResponse as ActionListResponse
diff --git a/src/gradient/types/gpu_droplets/images/action_create_params.py b/src/gradient/types/gpu_droplets/images/action_create_params.py
new file mode 100644
index 00000000..a1b57d47
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/images/action_create_params.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = ["ActionCreateParams", "ImageActionBase", "ImageActionTransfer"]
+
+
+class ImageActionBase(TypedDict, total=False):
+ type: Required[Literal["convert", "transfer"]]
+ """The action to be taken on the image. Can be either `convert` or `transfer`."""
+
+
+class ImageActionTransfer(TypedDict, total=False):
+ region: Required[
+ Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+ type: Required[Literal["convert", "transfer"]]
+ """The action to be taken on the image. Can be either `convert` or `transfer`."""
+
+
+ActionCreateParams: TypeAlias = Union[ImageActionBase, ImageActionTransfer]
diff --git a/src/gradient/types/gpu_droplets/images/action_list_response.py b/src/gradient/types/gpu_droplets/images/action_list_response.py
new file mode 100644
index 00000000..2f4edac5
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/images/action_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.action import Action
+from ...shared.page_links import PageLinks
+from ...shared.meta_properties import MetaProperties
+
+__all__ = ["ActionListResponse"]
+
+
+class ActionListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ actions: Optional[List[Action]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/lb_firewall.py b/src/gradient/types/gpu_droplets/lb_firewall.py
new file mode 100644
index 00000000..d233c642
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/lb_firewall.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["LbFirewall"]
+
+
+class LbFirewall(BaseModel):
+ """
+ An object specifying allow and deny rules to control traffic to the load balancer.
+ """
+
+ allow: Optional[List[str]] = None
+ """
+ the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or
+ 'cidr:1.2.0.0/16')
+ """
+
+ deny: Optional[List[str]] = None
+ """
+ the rules for denying traffic to the load balancer (in the form 'ip:1.2.3.4' or
+ 'cidr:1.2.0.0/16')
+ """
diff --git a/src/gradient/types/gpu_droplets/lb_firewall_param.py b/src/gradient/types/gpu_droplets/lb_firewall_param.py
new file mode 100644
index 00000000..b15cb32c
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/lb_firewall_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = ["LbFirewallParam"]
+
+
+class LbFirewallParam(TypedDict, total=False):
+ """
+ An object specifying allow and deny rules to control traffic to the load balancer.
+ """
+
+ allow: SequenceNotStr[str]
+ """
+ the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or
+ 'cidr:1.2.0.0/16')
+ """
+
+ deny: SequenceNotStr[str]
+ """
+ the rules for denying traffic to the load balancer (in the form 'ip:1.2.3.4' or
+ 'cidr:1.2.0.0/16')
+ """
diff --git a/src/gradient/types/gpu_droplets/load_balancer.py b/src/gradient/types/gpu_droplets/load_balancer.py
new file mode 100644
index 00000000..d0e7597a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancer.py
@@ -0,0 +1,185 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .domains import Domains
+from ..._models import BaseModel
+from .lb_firewall import LbFirewall
+from .glb_settings import GlbSettings
+from .health_check import HealthCheck
+from ..shared.region import Region
+from .forwarding_rule import ForwardingRule
+from .sticky_sessions import StickySessions
+
+__all__ = ["LoadBalancer"]
+
+
+class LoadBalancer(BaseModel):
+ forwarding_rules: List[ForwardingRule]
+ """An array of objects specifying the forwarding rules for a load balancer."""
+
+ id: Optional[str] = None
+ """A unique ID that can be used to identify and reference a load balancer."""
+
+ algorithm: Optional[Literal["round_robin", "least_connections"]] = None
+ """This field has been deprecated.
+
+ You can no longer specify an algorithm for load balancers.
+ """
+
+ created_at: Optional[datetime] = None
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the load balancer was created.
+ """
+
+ disable_lets_encrypt_dns_records: Optional[bool] = None
+ """
+ A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+ """
+
+ domains: Optional[List[Domains]] = None
+ """
+ An array of objects specifying the domain configurations for a Global load
+ balancer.
+ """
+
+ droplet_ids: Optional[List[int]] = None
+ """An array containing the IDs of the Droplets assigned to the load balancer."""
+
+ enable_backend_keepalive: Optional[bool] = None
+ """
+ A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+ """
+
+ enable_proxy_protocol: Optional[bool] = None
+ """A boolean value indicating whether PROXY Protocol is in use."""
+
+ firewall: Optional[LbFirewall] = None
+ """
+ An object specifying allow and deny rules to control traffic to the load
+ balancer.
+ """
+
+ glb_settings: Optional[GlbSettings] = None
+ """An object specifying forwarding configurations for a Global load balancer."""
+
+ health_check: Optional[HealthCheck] = None
+ """An object specifying health check settings for the load balancer."""
+
+ http_idle_timeout_seconds: Optional[int] = None
+ """
+ An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+ """
+
+ ip: Optional[str] = None
+ """An attribute containing the public-facing IP address of the load balancer."""
+
+ ipv6: Optional[str] = None
+ """An attribute containing the public-facing IPv6 address of the load balancer."""
+
+ name: Optional[str] = None
+ """A human-readable name for a load balancer instance."""
+
+ network: Optional[Literal["EXTERNAL", "INTERNAL"]] = None
+ """A string indicating whether the load balancer should be external or internal.
+
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+ """
+
+ network_stack: Optional[Literal["IPV4", "DUALSTACK"]] = None
+ """
+ A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+ """
+
+ project_id: Optional[str] = None
+ """The ID of the project that the load balancer is associated with.
+
+ If no ID is provided at creation, the load balancer associates with the user's
+ default project. If an invalid project ID is provided, the load balancer will
+ not be created.
+ """
+
+ redirect_http_to_https: Optional[bool] = None
+ """
+ A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+ """
+
+ region: Optional[Region] = None
+ """The region where the load balancer instance is located.
+
+ When setting a region, the value should be the slug identifier for the region.
+ When you query a load balancer, an entire region object will be returned.
+ """
+
+ size: Optional[Literal["lb-small", "lb-medium", "lb-large"]] = None
+ """
+ This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+ """
+
+ size_unit: Optional[int] = None
+ """How many nodes the load balancer contains.
+
+ Each additional node increases the load balancer's ability to manage more
+ connections. Load balancers can be scaled up or down, and you can change the
+ number of nodes after creation up to once per hour. This field is currently not
+ available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load
+ balancers that reside in these regions.
+ """
+
+ status: Optional[Literal["new", "active", "errored"]] = None
+ """A status string indicating the current state of the load balancer.
+
+ This can be `new`, `active`, or `errored`.
+ """
+
+ sticky_sessions: Optional[StickySessions] = None
+ """An object specifying sticky sessions settings for the load balancer."""
+
+ tag: Optional[str] = None
+ """
+ The name of a Droplet tag corresponding to Droplets assigned to the load
+ balancer.
+ """
+
+ target_load_balancer_ids: Optional[List[str]] = None
+ """
+ An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+ """
+
+ tls_cipher_policy: Optional[Literal["DEFAULT", "STRONG"]] = None
+ """
+ A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+ """
+
+ type: Optional[Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"]] = None
+ """
+ A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+ """
+
+ vpc_uuid: Optional[str] = None
+ """A string specifying the UUID of the VPC to which the load balancer is assigned."""
diff --git a/src/gradient/types/gpu_droplets/load_balancer_create_params.py b/src/gradient/types/gpu_droplets/load_balancer_create_params.py
new file mode 100644
index 00000000..06472c78
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancer_create_params.py
@@ -0,0 +1,336 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..._types import SequenceNotStr
+from .domains_param import DomainsParam
+from .lb_firewall_param import LbFirewallParam
+from .glb_settings_param import GlbSettingsParam
+from .health_check_param import HealthCheckParam
+from .forwarding_rule_param import ForwardingRuleParam
+from .sticky_sessions_param import StickySessionsParam
+
+__all__ = ["LoadBalancerCreateParams", "AssignDropletsByID", "AssignDropletsByTag"]
+
+
+class AssignDropletsByID(TypedDict, total=False):
+ forwarding_rules: Required[Iterable[ForwardingRuleParam]]
+ """An array of objects specifying the forwarding rules for a load balancer."""
+
+ algorithm: Literal["round_robin", "least_connections"]
+ """This field has been deprecated.
+
+ You can no longer specify an algorithm for load balancers.
+ """
+
+ disable_lets_encrypt_dns_records: bool
+ """
+ A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+ """
+
+ domains: Iterable[DomainsParam]
+ """
+ An array of objects specifying the domain configurations for a Global load
+ balancer.
+ """
+
+ droplet_ids: Iterable[int]
+ """An array containing the IDs of the Droplets assigned to the load balancer."""
+
+ enable_backend_keepalive: bool
+ """
+ A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+ """
+
+ enable_proxy_protocol: bool
+ """A boolean value indicating whether PROXY Protocol is in use."""
+
+ firewall: LbFirewallParam
+ """
+ An object specifying allow and deny rules to control traffic to the load
+ balancer.
+ """
+
+ glb_settings: GlbSettingsParam
+ """An object specifying forwarding configurations for a Global load balancer."""
+
+ health_check: HealthCheckParam
+ """An object specifying health check settings for the load balancer."""
+
+ http_idle_timeout_seconds: int
+ """
+ An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+ """
+
+ name: str
+ """A human-readable name for a load balancer instance."""
+
+ network: Literal["EXTERNAL", "INTERNAL"]
+ """A string indicating whether the load balancer should be external or internal.
+
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+ """
+
+ network_stack: Literal["IPV4", "DUALSTACK"]
+ """
+ A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+ """
+
+ project_id: str
+ """The ID of the project that the load balancer is associated with.
+
+ If no ID is provided at creation, the load balancer associates with the user's
+ default project. If an invalid project ID is provided, the load balancer will
+ not be created.
+ """
+
+ redirect_http_to_https: bool
+ """
+ A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+ """
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+ size: Literal["lb-small", "lb-medium", "lb-large"]
+ """
+ This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+ """
+
+ size_unit: int
+ """How many nodes the load balancer contains.
+
+ Each additional node increases the load balancer's ability to manage more
+ connections. Load balancers can be scaled up or down, and you can change the
+ number of nodes after creation up to once per hour. This field is currently not
+ available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load
+ balancers that reside in these regions.
+ """
+
+ sticky_sessions: StickySessionsParam
+ """An object specifying sticky sessions settings for the load balancer."""
+
+ target_load_balancer_ids: SequenceNotStr[str]
+ """
+ An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+ """
+
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"]
+ """
+ A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+ """
+
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"]
+ """
+ A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+ """
+
+ vpc_uuid: str
+ """A string specifying the UUID of the VPC to which the load balancer is assigned."""
+
+
+class AssignDropletsByTag(TypedDict, total=False):
+ forwarding_rules: Required[Iterable[ForwardingRuleParam]]
+ """An array of objects specifying the forwarding rules for a load balancer."""
+
+ algorithm: Literal["round_robin", "least_connections"]
+ """This field has been deprecated.
+
+ You can no longer specify an algorithm for load balancers.
+ """
+
+ disable_lets_encrypt_dns_records: bool
+ """
+ A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+ """
+
+ domains: Iterable[DomainsParam]
+ """
+ An array of objects specifying the domain configurations for a Global load
+ balancer.
+ """
+
+ enable_backend_keepalive: bool
+ """
+ A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+ """
+
+ enable_proxy_protocol: bool
+ """A boolean value indicating whether PROXY Protocol is in use."""
+
+ firewall: LbFirewallParam
+ """
+ An object specifying allow and deny rules to control traffic to the load
+ balancer.
+ """
+
+ glb_settings: GlbSettingsParam
+ """An object specifying forwarding configurations for a Global load balancer."""
+
+ health_check: HealthCheckParam
+ """An object specifying health check settings for the load balancer."""
+
+ http_idle_timeout_seconds: int
+ """
+ An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+ """
+
+ name: str
+ """A human-readable name for a load balancer instance."""
+
+ network: Literal["EXTERNAL", "INTERNAL"]
+ """A string indicating whether the load balancer should be external or internal.
+
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+ """
+
+ network_stack: Literal["IPV4", "DUALSTACK"]
+ """
+ A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+ """
+
+ project_id: str
+ """The ID of the project that the load balancer is associated with.
+
+ If no ID is provided at creation, the load balancer associates with the user's
+ default project. If an invalid project ID is provided, the load balancer will
+ not be created.
+ """
+
+ redirect_http_to_https: bool
+ """
+ A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+ """
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+ size: Literal["lb-small", "lb-medium", "lb-large"]
+ """
+ This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+ """
+
+ size_unit: int
+ """How many nodes the load balancer contains.
+
+ Each additional node increases the load balancer's ability to manage more
+ connections. Load balancers can be scaled up or down, and you can change the
+ number of nodes after creation up to once per hour. This field is currently not
+ available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load
+ balancers that reside in these regions.
+ """
+
+ sticky_sessions: StickySessionsParam
+ """An object specifying sticky sessions settings for the load balancer."""
+
+ tag: str
+ """
+ The name of a Droplet tag corresponding to Droplets assigned to the load
+ balancer.
+ """
+
+ target_load_balancer_ids: SequenceNotStr[str]
+ """
+ An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+ """
+
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"]
+ """
+ A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+ """
+
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"]
+ """
+ A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+ """
+
+ vpc_uuid: str
+ """A string specifying the UUID of the VPC to which the load balancer is assigned."""
+
+
+LoadBalancerCreateParams: TypeAlias = Union[AssignDropletsByID, AssignDropletsByTag]
diff --git a/src/gradient/types/gpu_droplets/load_balancer_create_response.py b/src/gradient/types/gpu_droplets/load_balancer_create_response.py
new file mode 100644
index 00000000..ed4f2211
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancer_create_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .load_balancer import LoadBalancer
+
+__all__ = ["LoadBalancerCreateResponse"]
+
+
+class LoadBalancerCreateResponse(BaseModel):
+ load_balancer: Optional[LoadBalancer] = None
diff --git a/src/gradient/types/gpu_droplets/load_balancer_list_params.py b/src/gradient/types/gpu_droplets/load_balancer_list_params.py
new file mode 100644
index 00000000..d0daff3f
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancer_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["LoadBalancerListParams"]
+
+
+class LoadBalancerListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/load_balancer_list_response.py b/src/gradient/types/gpu_droplets/load_balancer_list_response.py
new file mode 100644
index 00000000..d5d0b4ac
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancer_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .load_balancer import LoadBalancer
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["LoadBalancerListResponse"]
+
+
+class LoadBalancerListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ links: Optional[PageLinks] = None
+
+ load_balancers: Optional[List[LoadBalancer]] = None
diff --git a/src/gradient/types/gpu_droplets/load_balancer_retrieve_response.py b/src/gradient/types/gpu_droplets/load_balancer_retrieve_response.py
new file mode 100644
index 00000000..779e9693
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancer_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .load_balancer import LoadBalancer
+
+__all__ = ["LoadBalancerRetrieveResponse"]
+
+
+class LoadBalancerRetrieveResponse(BaseModel):
+ load_balancer: Optional[LoadBalancer] = None
diff --git a/src/gradient/types/gpu_droplets/load_balancer_update_params.py b/src/gradient/types/gpu_droplets/load_balancer_update_params.py
new file mode 100644
index 00000000..01c2bda5
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancer_update_params.py
@@ -0,0 +1,336 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..._types import SequenceNotStr
+from .domains_param import DomainsParam
+from .lb_firewall_param import LbFirewallParam
+from .glb_settings_param import GlbSettingsParam
+from .health_check_param import HealthCheckParam
+from .forwarding_rule_param import ForwardingRuleParam
+from .sticky_sessions_param import StickySessionsParam
+
+__all__ = ["LoadBalancerUpdateParams", "AssignDropletsByID", "AssignDropletsByTag"]
+
+
+class AssignDropletsByID(TypedDict, total=False):
+ forwarding_rules: Required[Iterable[ForwardingRuleParam]]
+ """An array of objects specifying the forwarding rules for a load balancer."""
+
+ algorithm: Literal["round_robin", "least_connections"]
+ """This field has been deprecated.
+
+ You can no longer specify an algorithm for load balancers.
+ """
+
+ disable_lets_encrypt_dns_records: bool
+ """
+ A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+ """
+
+ domains: Iterable[DomainsParam]
+ """
+ An array of objects specifying the domain configurations for a Global load
+ balancer.
+ """
+
+ droplet_ids: Iterable[int]
+ """An array containing the IDs of the Droplets assigned to the load balancer."""
+
+ enable_backend_keepalive: bool
+ """
+ A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+ """
+
+ enable_proxy_protocol: bool
+ """A boolean value indicating whether PROXY Protocol is in use."""
+
+ firewall: LbFirewallParam
+ """
+ An object specifying allow and deny rules to control traffic to the load
+ balancer.
+ """
+
+ glb_settings: GlbSettingsParam
+ """An object specifying forwarding configurations for a Global load balancer."""
+
+ health_check: HealthCheckParam
+ """An object specifying health check settings for the load balancer."""
+
+ http_idle_timeout_seconds: int
+ """
+ An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+ """
+
+ name: str
+ """A human-readable name for a load balancer instance."""
+
+ network: Literal["EXTERNAL", "INTERNAL"]
+ """A string indicating whether the load balancer should be external or internal.
+
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+ """
+
+ network_stack: Literal["IPV4", "DUALSTACK"]
+ """
+ A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+ """
+
+ project_id: str
+ """The ID of the project that the load balancer is associated with.
+
+ If no ID is provided at creation, the load balancer associates with the user's
+ default project. If an invalid project ID is provided, the load balancer will
+ not be created.
+ """
+
+ redirect_http_to_https: bool
+ """
+ A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+ """
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+ size: Literal["lb-small", "lb-medium", "lb-large"]
+ """
+ This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+ """
+
+ size_unit: int
+ """How many nodes the load balancer contains.
+
+ Each additional node increases the load balancer's ability to manage more
+ connections. Load balancers can be scaled up or down, and you can change the
+ number of nodes after creation up to once per hour. This field is currently not
+ available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load
+ balancers that reside in these regions.
+ """
+
+ sticky_sessions: StickySessionsParam
+ """An object specifying sticky sessions settings for the load balancer."""
+
+ target_load_balancer_ids: SequenceNotStr[str]
+ """
+ An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+ """
+
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"]
+ """
+ A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+ """
+
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"]
+ """
+ A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+ """
+
+ vpc_uuid: str
+ """A string specifying the UUID of the VPC to which the load balancer is assigned."""
+
+
+class AssignDropletsByTag(TypedDict, total=False):
+ forwarding_rules: Required[Iterable[ForwardingRuleParam]]
+ """An array of objects specifying the forwarding rules for a load balancer."""
+
+ algorithm: Literal["round_robin", "least_connections"]
+ """This field has been deprecated.
+
+ You can no longer specify an algorithm for load balancers.
+ """
+
+ disable_lets_encrypt_dns_records: bool
+ """
+ A boolean value indicating whether to disable automatic DNS record creation for
+ Let's Encrypt certificates that are added to the load balancer.
+ """
+
+ domains: Iterable[DomainsParam]
+ """
+ An array of objects specifying the domain configurations for a Global load
+ balancer.
+ """
+
+ enable_backend_keepalive: bool
+ """
+ A boolean value indicating whether HTTP keepalive connections are maintained to
+ target Droplets.
+ """
+
+ enable_proxy_protocol: bool
+ """A boolean value indicating whether PROXY Protocol is in use."""
+
+ firewall: LbFirewallParam
+ """
+ An object specifying allow and deny rules to control traffic to the load
+ balancer.
+ """
+
+ glb_settings: GlbSettingsParam
+ """An object specifying forwarding configurations for a Global load balancer."""
+
+ health_check: HealthCheckParam
+ """An object specifying health check settings for the load balancer."""
+
+ http_idle_timeout_seconds: int
+ """
+ An integer value which configures the idle timeout for HTTP requests to the
+ target droplets.
+ """
+
+ name: str
+ """A human-readable name for a load balancer instance."""
+
+ network: Literal["EXTERNAL", "INTERNAL"]
+ """A string indicating whether the load balancer should be external or internal.
+
+ Internal load balancers have no public IPs and are only accessible to resources
+ on the same VPC network. This property cannot be updated after creating the load
+ balancer.
+ """
+
+ network_stack: Literal["IPV4", "DUALSTACK"]
+ """
+ A string indicating whether the load balancer will support IPv4 or both IPv4 and
+ IPv6 networking. This property cannot be updated after creating the load
+ balancer.
+ """
+
+ project_id: str
+ """The ID of the project that the load balancer is associated with.
+
+ If no ID is provided at creation, the load balancer associates with the user's
+ default project. If an invalid project ID is provided, the load balancer will
+ not be created.
+ """
+
+ redirect_http_to_https: bool
+ """
+ A boolean value indicating whether HTTP requests to the load balancer on port 80
+ will be redirected to HTTPS on port 443.
+ """
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+ size: Literal["lb-small", "lb-medium", "lb-large"]
+ """
+ This field has been replaced by the `size_unit` field for all regions except in
+ AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load
+ balancer having a set number of nodes.
+
+ - `lb-small` = 1 node
+ - `lb-medium` = 3 nodes
+ - `lb-large` = 6 nodes
+
+ You can resize load balancers after creation up to once per hour. You cannot
+ resize a load balancer within the first hour of its creation.
+ """
+
+ size_unit: int
+ """How many nodes the load balancer contains.
+
+ Each additional node increases the load balancer's ability to manage more
+ connections. Load balancers can be scaled up or down, and you can change the
+ number of nodes after creation up to once per hour. This field is currently not
+ available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load
+ balancers that reside in these regions.
+ """
+
+ sticky_sessions: StickySessionsParam
+ """An object specifying sticky sessions settings for the load balancer."""
+
+ tag: str
+ """
+ The name of a Droplet tag corresponding to Droplets assigned to the load
+ balancer.
+ """
+
+ target_load_balancer_ids: SequenceNotStr[str]
+ """
+ An array containing the UUIDs of the Regional load balancers to be used as
+ target backends for a Global load balancer.
+ """
+
+ tls_cipher_policy: Literal["DEFAULT", "STRONG"]
+ """
+ A string indicating the policy for the TLS cipher suites used by the load
+ balancer. The possible values are `DEFAULT` or `STRONG`. The default value is
+ `DEFAULT`.
+ """
+
+ type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"]
+ """
+ A string indicating whether the load balancer should be a standard regional HTTP
+ load balancer, a regional network load balancer that routes traffic at the
+ TCP/UDP transport layer, or a global load balancer.
+ """
+
+ vpc_uuid: str
+ """A string specifying the UUID of the VPC to which the load balancer is assigned."""
+
+
+LoadBalancerUpdateParams: TypeAlias = Union[AssignDropletsByID, AssignDropletsByTag]
diff --git a/src/gradient/types/gpu_droplets/load_balancer_update_response.py b/src/gradient/types/gpu_droplets/load_balancer_update_response.py
new file mode 100644
index 00000000..2b24b376
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancer_update_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .load_balancer import LoadBalancer
+
+__all__ = ["LoadBalancerUpdateResponse"]
+
+
+class LoadBalancerUpdateResponse(BaseModel):
+ load_balancer: Optional[LoadBalancer] = None
diff --git a/src/gradient/types/gpu_droplets/load_balancers/__init__.py b/src/gradient/types/gpu_droplets/load_balancers/__init__.py
new file mode 100644
index 00000000..806a71be
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancers/__init__.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .droplet_add_params import DropletAddParams as DropletAddParams
+from .droplet_remove_params import DropletRemoveParams as DropletRemoveParams
+from .forwarding_rule_add_params import ForwardingRuleAddParams as ForwardingRuleAddParams
+from .forwarding_rule_remove_params import ForwardingRuleRemoveParams as ForwardingRuleRemoveParams
diff --git a/src/gradient/types/gpu_droplets/load_balancers/droplet_add_params.py b/src/gradient/types/gpu_droplets/load_balancers/droplet_add_params.py
new file mode 100644
index 00000000..ee403f5f
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancers/droplet_add_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Required, TypedDict
+
+__all__ = ["DropletAddParams"]
+
+
+class DropletAddParams(TypedDict, total=False):
+ droplet_ids: Required[Iterable[int]]
+ """An array containing the IDs of the Droplets assigned to the load balancer."""
diff --git a/src/gradient/types/gpu_droplets/load_balancers/droplet_remove_params.py b/src/gradient/types/gpu_droplets/load_balancers/droplet_remove_params.py
new file mode 100644
index 00000000..d48795e9
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancers/droplet_remove_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Required, TypedDict
+
+__all__ = ["DropletRemoveParams"]
+
+
+class DropletRemoveParams(TypedDict, total=False):
+ droplet_ids: Required[Iterable[int]]
+ """An array containing the IDs of the Droplets assigned to the load balancer."""
diff --git a/src/gradient/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py b/src/gradient/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py
new file mode 100644
index 00000000..2cc6a2df
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Required, TypedDict
+
+from ..forwarding_rule_param import ForwardingRuleParam
+
+__all__ = ["ForwardingRuleAddParams"]
+
+
+class ForwardingRuleAddParams(TypedDict, total=False):
+ forwarding_rules: Required[Iterable[ForwardingRuleParam]]
diff --git a/src/gradient/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py b/src/gradient/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py
new file mode 100644
index 00000000..e5209543
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Required, TypedDict
+
+from ..forwarding_rule_param import ForwardingRuleParam
+
+__all__ = ["ForwardingRuleRemoveParams"]
+
+
+class ForwardingRuleRemoveParams(TypedDict, total=False):
+ forwarding_rules: Required[Iterable[ForwardingRuleParam]]
diff --git a/src/gradient/types/gpu_droplets/size_list_params.py b/src/gradient/types/gpu_droplets/size_list_params.py
new file mode 100644
index 00000000..5df85a9c
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/size_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["SizeListParams"]
+
+
+class SizeListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/size_list_response.py b/src/gradient/types/gpu_droplets/size_list_response.py
new file mode 100644
index 00000000..c0c600b4
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/size_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.size import Size
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["SizeListResponse"]
+
+
+class SizeListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ sizes: List[Size]
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/snapshot_list_params.py b/src/gradient/types/gpu_droplets/snapshot_list_params.py
new file mode 100644
index 00000000..6d1b6f5b
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/snapshot_list_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["SnapshotListParams"]
+
+
+class SnapshotListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
+
+ resource_type: Literal["droplet", "volume"]
+ """Used to filter snapshots by a resource type."""
diff --git a/src/gradient/types/gpu_droplets/snapshot_list_response.py b/src/gradient/types/gpu_droplets/snapshot_list_response.py
new file mode 100644
index 00000000..29b6ec3b
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/snapshot_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.snapshots import Snapshots
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["SnapshotListResponse"]
+
+
+class SnapshotListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ links: Optional[PageLinks] = None
+
+ snapshots: Optional[List[Snapshots]] = None
diff --git a/src/gradient/types/gpu_droplets/snapshot_retrieve_response.py b/src/gradient/types/gpu_droplets/snapshot_retrieve_response.py
new file mode 100644
index 00000000..38d84c7a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/snapshot_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from ..shared.snapshots import Snapshots
+
+__all__ = ["SnapshotRetrieveResponse"]
+
+
+class SnapshotRetrieveResponse(BaseModel):
+ snapshot: Optional[Snapshots] = None
diff --git a/src/gradient/types/gpu_droplets/sticky_sessions.py b/src/gradient/types/gpu_droplets/sticky_sessions.py
new file mode 100644
index 00000000..1723241a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/sticky_sessions.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["StickySessions"]
+
+
+class StickySessions(BaseModel):
+ """An object specifying sticky sessions settings for the load balancer."""
+
+ cookie_name: Optional[str] = None
+ """The name of the cookie sent to the client.
+
+ This attribute is only returned when using `cookies` for the sticky sessions
+ type.
+ """
+
+ cookie_ttl_seconds: Optional[int] = None
+ """The number of seconds until the cookie set by the load balancer expires.
+
+ This attribute is only returned when using `cookies` for the sticky sessions
+ type.
+ """
+
+ type: Optional[Literal["cookies", "none"]] = None
+ """
+ An attribute indicating how and if requests from a client will be persistently
+ served by the same backend Droplet. The possible values are `cookies` or `none`.
+ """
diff --git a/src/gradient/types/gpu_droplets/sticky_sessions_param.py b/src/gradient/types/gpu_droplets/sticky_sessions_param.py
new file mode 100644
index 00000000..425873dc
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/sticky_sessions_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["StickySessionsParam"]
+
+
+class StickySessionsParam(TypedDict, total=False):
+ """An object specifying sticky sessions settings for the load balancer."""
+
+ cookie_name: str
+ """The name of the cookie sent to the client.
+
+ This attribute is only returned when using `cookies` for the sticky sessions
+ type.
+ """
+
+ cookie_ttl_seconds: int
+ """The number of seconds until the cookie set by the load balancer expires.
+
+ This attribute is only returned when using `cookies` for the sticky sessions
+ type.
+ """
+
+ type: Literal["cookies", "none"]
+ """
+ An attribute indicating how and if requests from a client will be persistently
+ served by the same backend Droplet. The possible values are `cookies` or `none`.
+ """
diff --git a/src/gradient/types/gpu_droplets/volume_create_params.py b/src/gradient/types/gpu_droplets/volume_create_params.py
new file mode 100644
index 00000000..c58f7f9d
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volume_create_params.py
@@ -0,0 +1,155 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = ["VolumeCreateParams", "VolumesExt4", "VolumesXfs"]
+
+
+class VolumesExt4(TypedDict, total=False):
+ name: Required[str]
+ """A human-readable name for the block storage volume.
+
+ Must be lowercase and be composed only of numbers, letters and "-", up to a
+ limit of 64 characters. The name must begin with a letter.
+ """
+
+ region: Required[
+ Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+ size_gigabytes: Required[int]
+ """The size of the block storage volume in GiB (1024^3).
+
+ This field does not apply when creating a volume from a snapshot.
+ """
+
+ description: str
+ """An optional free-form text field to describe a block storage volume."""
+
+ filesystem_label: str
+ """The label applied to the filesystem.
+
+ Labels for ext4 type filesystems may contain 16 characters while labels for xfs
+ type filesystems are limited to 12 characters. May only be used in conjunction
+ with filesystem_type.
+ """
+
+ filesystem_type: str
+ """The name of the filesystem type to be used on the volume.
+
+ When provided, the volume will automatically be formatted to the specified
+ filesystem type. Currently, the available options are `ext4` and `xfs`.
+ Pre-formatted volumes are automatically mounted when attached to Ubuntu, Debian,
+ Fedora, Fedora Atomic, and CentOS Droplets created on or after April 26, 2018.
+ Attaching pre-formatted volumes to other Droplets is not recommended.
+ """
+
+ snapshot_id: str
+ """The unique identifier for the volume snapshot from which to create the volume."""
+
+ tags: Optional[SequenceNotStr[str]]
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+ """
+
+
+class VolumesXfs(TypedDict, total=False):
+ name: Required[str]
+ """A human-readable name for the block storage volume.
+
+ Must be lowercase and be composed only of numbers, letters and "-", up to a
+ limit of 64 characters. The name must begin with a letter.
+ """
+
+ region: Required[
+ Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+ size_gigabytes: Required[int]
+ """The size of the block storage volume in GiB (1024^3).
+
+ This field does not apply when creating a volume from a snapshot.
+ """
+
+ description: str
+ """An optional free-form text field to describe a block storage volume."""
+
+ filesystem_label: str
+ """The label applied to the filesystem.
+
+ Labels for ext4 type filesystems may contain 16 characters while labels for xfs
+ type filesystems are limited to 12 characters. May only be used in conjunction
+ with filesystem_type.
+ """
+
+ filesystem_type: str
+ """The name of the filesystem type to be used on the volume.
+
+ When provided, the volume will automatically be formatted to the specified
+ filesystem type. Currently, the available options are `ext4` and `xfs`.
+ Pre-formatted volumes are automatically mounted when attached to Ubuntu, Debian,
+ Fedora, Fedora Atomic, and CentOS Droplets created on or after April 26, 2018.
+ Attaching pre-formatted volumes to other Droplets is not recommended.
+ """
+
+ snapshot_id: str
+ """The unique identifier for the volume snapshot from which to create the volume."""
+
+ tags: Optional[SequenceNotStr[str]]
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+ """
+
+
+VolumeCreateParams: TypeAlias = Union[VolumesExt4, VolumesXfs]
diff --git a/src/gradient/types/gpu_droplets/volume_create_response.py b/src/gradient/types/gpu_droplets/volume_create_response.py
new file mode 100644
index 00000000..1bca9965
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volume_create_response.py
@@ -0,0 +1,65 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.region import Region
+
+__all__ = ["VolumeCreateResponse", "Volume"]
+
+
+class Volume(BaseModel):
+ id: Optional[str] = None
+ """The unique identifier for the block storage volume."""
+
+ created_at: Optional[str] = None
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the block storage volume was created.
+ """
+
+ description: Optional[str] = None
+ """An optional free-form text field to describe a block storage volume."""
+
+ droplet_ids: Optional[List[int]] = None
+ """An array containing the IDs of the Droplets the volume is attached to.
+
+ Note that at this time, a volume can only be attached to a single Droplet.
+ """
+
+ filesystem_label: Optional[str] = None
+ """The label currently applied to the filesystem."""
+
+ filesystem_type: Optional[str] = None
+ """The type of filesystem currently in-use on the volume."""
+
+ name: Optional[str] = None
+ """A human-readable name for the block storage volume.
+
+ Must be lowercase and be composed only of numbers, letters and "-", up to a
+ limit of 64 characters. The name must begin with a letter.
+ """
+
+ region: Optional[Region] = None
+ """The region that the block storage volume is located in.
+
+ When setting a region, the value should be the slug identifier for the region.
+ When you query a block storage volume, the entire region object will be
+ returned.
+ """
+
+ size_gigabytes: Optional[int] = None
+ """The size of the block storage volume in GiB (1024^3).
+
+ This field does not apply when creating a volume from a snapshot.
+ """
+
+ tags: Optional[List[str]] = None
+ """A flat array of tag names as strings applied to the resource.
+
+ Requires `tag:read` scope.
+ """
+
+
+class VolumeCreateResponse(BaseModel):
+ volume: Optional[Volume] = None
diff --git a/src/gradient/types/gpu_droplets/volume_delete_by_name_params.py b/src/gradient/types/gpu_droplets/volume_delete_by_name_params.py
new file mode 100644
index 00000000..26d173f0
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volume_delete_by_name_params.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["VolumeDeleteByNameParams"]
+
+
+class VolumeDeleteByNameParams(TypedDict, total=False):
+ name: str
+ """The block storage volume's name."""
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """The slug identifier for the region where the resource is available."""
diff --git a/src/gradient/types/gpu_droplets/volume_list_params.py b/src/gradient/types/gpu_droplets/volume_list_params.py
new file mode 100644
index 00000000..b4549651
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volume_list_params.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["VolumeListParams"]
+
+
+class VolumeListParams(TypedDict, total=False):
+ name: str
+ """The block storage volume's name."""
+
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """The slug identifier for the region where the resource is available."""
diff --git a/src/gradient/types/gpu_droplets/volume_list_response.py b/src/gradient/types/gpu_droplets/volume_list_response.py
new file mode 100644
index 00000000..69ff421a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volume_list_response.py
@@ -0,0 +1,73 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.region import Region
+from ..shared.page_links import PageLinks
+from ..shared.meta_properties import MetaProperties
+
+__all__ = ["VolumeListResponse", "Volume"]
+
+
+class Volume(BaseModel):
+ id: Optional[str] = None
+ """The unique identifier for the block storage volume."""
+
+ created_at: Optional[str] = None
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the block storage volume was created.
+ """
+
+ description: Optional[str] = None
+ """An optional free-form text field to describe a block storage volume."""
+
+ droplet_ids: Optional[List[int]] = None
+ """An array containing the IDs of the Droplets the volume is attached to.
+
+ Note that at this time, a volume can only be attached to a single Droplet.
+ """
+
+ filesystem_label: Optional[str] = None
+ """The label currently applied to the filesystem."""
+
+ filesystem_type: Optional[str] = None
+ """The type of filesystem currently in-use on the volume."""
+
+ name: Optional[str] = None
+ """A human-readable name for the block storage volume.
+
+ Must be lowercase and be composed only of numbers, letters and "-", up to a
+ limit of 64 characters. The name must begin with a letter.
+ """
+
+ region: Optional[Region] = None
+ """The region that the block storage volume is located in.
+
+ When setting a region, the value should be the slug identifier for the region.
+ When you query a block storage volume, the entire region object will be
+ returned.
+ """
+
+ size_gigabytes: Optional[int] = None
+ """The size of the block storage volume in GiB (1024^3).
+
+ This field does not apply when creating a volume from a snapshot.
+ """
+
+ tags: Optional[List[str]] = None
+ """A flat array of tag names as strings applied to the resource.
+
+ Requires `tag:read` scope.
+ """
+
+
+class VolumeListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ volumes: List[Volume]
+ """Array of volumes."""
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/volume_retrieve_response.py b/src/gradient/types/gpu_droplets/volume_retrieve_response.py
new file mode 100644
index 00000000..3efe8de7
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volume_retrieve_response.py
@@ -0,0 +1,65 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.region import Region
+
+__all__ = ["VolumeRetrieveResponse", "Volume"]
+
+
+class Volume(BaseModel):
+ id: Optional[str] = None
+ """The unique identifier for the block storage volume."""
+
+ created_at: Optional[str] = None
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the block storage volume was created.
+ """
+
+ description: Optional[str] = None
+ """An optional free-form text field to describe a block storage volume."""
+
+ droplet_ids: Optional[List[int]] = None
+ """An array containing the IDs of the Droplets the volume is attached to.
+
+ Note that at this time, a volume can only be attached to a single Droplet.
+ """
+
+ filesystem_label: Optional[str] = None
+ """The label currently applied to the filesystem."""
+
+ filesystem_type: Optional[str] = None
+ """The type of filesystem currently in-use on the volume."""
+
+ name: Optional[str] = None
+ """A human-readable name for the block storage volume.
+
+ Must be lowercase and be composed only of numbers, letters and "-", up to a
+ limit of 64 characters. The name must begin with a letter.
+ """
+
+ region: Optional[Region] = None
+ """The region that the block storage volume is located in.
+
+ When setting a region, the value should be the slug identifier for the region.
+ When you query a block storage volume, the entire region object will be
+ returned.
+ """
+
+ size_gigabytes: Optional[int] = None
+ """The size of the block storage volume in GiB (1024^3).
+
+ This field does not apply when creating a volume from a snapshot.
+ """
+
+ tags: Optional[List[str]] = None
+ """A flat array of tag names as strings applied to the resource.
+
+ Requires `tag:read` scope.
+ """
+
+
+class VolumeRetrieveResponse(BaseModel):
+ volume: Optional[Volume] = None
diff --git a/src/gradient/types/gpu_droplets/volumes/__init__.py b/src/gradient/types/gpu_droplets/volumes/__init__.py
new file mode 100644
index 00000000..68d3d1e9
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/__init__.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .volume_action import VolumeAction as VolumeAction
+from .action_list_params import ActionListParams as ActionListParams
+from .action_list_response import ActionListResponse as ActionListResponse
+from .snapshot_list_params import SnapshotListParams as SnapshotListParams
+from .action_retrieve_params import ActionRetrieveParams as ActionRetrieveParams
+from .snapshot_create_params import SnapshotCreateParams as SnapshotCreateParams
+from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse
+from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse
+from .snapshot_create_response import SnapshotCreateResponse as SnapshotCreateResponse
+from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse
+from .action_initiate_by_id_params import ActionInitiateByIDParams as ActionInitiateByIDParams
+from .action_initiate_by_id_response import ActionInitiateByIDResponse as ActionInitiateByIDResponse
+from .action_initiate_by_name_params import ActionInitiateByNameParams as ActionInitiateByNameParams
+from .action_initiate_by_name_response import ActionInitiateByNameResponse as ActionInitiateByNameResponse
diff --git a/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_params.py b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_params.py
new file mode 100644
index 00000000..bf1869af
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_params.py
@@ -0,0 +1,135 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["ActionInitiateByIDParams", "VolumeActionPostAttach", "VolumeActionPostDetach", "VolumeActionPostResize"]
+
+
+class VolumeActionPostAttach(TypedDict, total=False):
+ droplet_id: Required[int]
+ """
+ The unique identifier for the Droplet the volume will be attached or detached
+ from.
+ """
+
+ type: Required[Literal["attach", "detach", "resize"]]
+ """The volume action to initiate."""
+
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+ tags: Optional[SequenceNotStr[str]]
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+ """
+
+
+class VolumeActionPostDetach(TypedDict, total=False):
+ droplet_id: Required[int]
+ """
+ The unique identifier for the Droplet the volume will be attached or detached
+ from.
+ """
+
+ type: Required[Literal["attach", "detach", "resize"]]
+ """The volume action to initiate."""
+
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+
+class VolumeActionPostResize(TypedDict, total=False):
+ size_gigabytes: Required[int]
+ """The new size of the block storage volume in GiB (1024^3)."""
+
+ type: Required[Literal["attach", "detach", "resize"]]
+ """The volume action to initiate."""
+
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+
+ActionInitiateByIDParams: TypeAlias = Union[VolumeActionPostAttach, VolumeActionPostDetach, VolumeActionPostResize]
diff --git a/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_response.py b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_response.py
new file mode 100644
index 00000000..d8460f22
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from .volume_action import VolumeAction
+
+__all__ = ["ActionInitiateByIDResponse"]
+
+
+class ActionInitiateByIDResponse(BaseModel):
+ action: Optional[VolumeAction] = None
diff --git a/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_params.py b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_params.py
new file mode 100644
index 00000000..f37d6d9a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_params.py
@@ -0,0 +1,99 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["ActionInitiateByNameParams", "VolumeActionPostAttach", "VolumeActionPostDetach"]
+
+
+class VolumeActionPostAttach(TypedDict, total=False):
+ droplet_id: Required[int]
+ """
+ The unique identifier for the Droplet the volume will be attached or detached
+ from.
+ """
+
+ type: Required[Literal["attach", "detach", "resize"]]
+ """The volume action to initiate."""
+
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+ tags: Optional[SequenceNotStr[str]]
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+ """
+
+
+class VolumeActionPostDetach(TypedDict, total=False):
+ droplet_id: Required[int]
+ """
+ The unique identifier for the Droplet the volume will be attached or detached
+ from.
+ """
+
+ type: Required[Literal["attach", "detach", "resize"]]
+ """The volume action to initiate."""
+
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
+
+ region: Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ """
+ The slug identifier for the region where the resource will initially be
+ available.
+ """
+
+
+ActionInitiateByNameParams: TypeAlias = Union[VolumeActionPostAttach, VolumeActionPostDetach]
diff --git a/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_response.py b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_response.py
new file mode 100644
index 00000000..9a935bdf
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from .volume_action import VolumeAction
+
+__all__ = ["ActionInitiateByNameResponse"]
+
+
+class ActionInitiateByNameResponse(BaseModel):
+ action: Optional[VolumeAction] = None
diff --git a/src/gradient/types/gpu_droplets/volumes/action_list_params.py b/src/gradient/types/gpu_droplets/volumes/action_list_params.py
new file mode 100644
index 00000000..dd873288
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/action_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ActionListParams"]
+
+
+class ActionListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/volumes/action_list_response.py b/src/gradient/types/gpu_droplets/volumes/action_list_response.py
new file mode 100644
index 00000000..35964633
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/action_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from .volume_action import VolumeAction
+from ...shared.page_links import PageLinks
+from ...shared.meta_properties import MetaProperties
+
+__all__ = ["ActionListResponse"]
+
+
+class ActionListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ actions: Optional[List[VolumeAction]] = None
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/gpu_droplets/volumes/action_retrieve_params.py b/src/gradient/types/gpu_droplets/volumes/action_retrieve_params.py
new file mode 100644
index 00000000..93ab443f
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/action_retrieve_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ActionRetrieveParams"]
+
+
+class ActionRetrieveParams(TypedDict, total=False):
+ volume_id: Required[str]
+
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/volumes/action_retrieve_response.py b/src/gradient/types/gpu_droplets/volumes/action_retrieve_response.py
new file mode 100644
index 00000000..cd47f37e
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/action_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from .volume_action import VolumeAction
+
+__all__ = ["ActionRetrieveResponse"]
+
+
+class ActionRetrieveResponse(BaseModel):
+ action: Optional[VolumeAction] = None
diff --git a/src/gradient/types/gpu_droplets/volumes/snapshot_create_params.py b/src/gradient/types/gpu_droplets/volumes/snapshot_create_params.py
new file mode 100644
index 00000000..890dd302
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/snapshot_create_params.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from ...._types import SequenceNotStr
+
+__all__ = ["SnapshotCreateParams"]
+
+
+class SnapshotCreateParams(TypedDict, total=False):
+ name: Required[str]
+ """A human-readable name for the volume snapshot."""
+
+ tags: Optional[SequenceNotStr[str]]
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+ """
diff --git a/src/gradient/types/gpu_droplets/volumes/snapshot_create_response.py b/src/gradient/types/gpu_droplets/volumes/snapshot_create_response.py
new file mode 100644
index 00000000..41701795
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/snapshot_create_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...shared.snapshots import Snapshots
+
+__all__ = ["SnapshotCreateResponse"]
+
+
+class SnapshotCreateResponse(BaseModel):
+ snapshot: Optional[Snapshots] = None
diff --git a/src/gradient/types/gpu_droplets/volumes/snapshot_list_params.py b/src/gradient/types/gpu_droplets/volumes/snapshot_list_params.py
new file mode 100644
index 00000000..65221a79
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/snapshot_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["SnapshotListParams"]
+
+
+class SnapshotListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/gpu_droplets/volumes/snapshot_list_response.py b/src/gradient/types/gpu_droplets/volumes/snapshot_list_response.py
new file mode 100644
index 00000000..25d91ed2
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/snapshot_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.snapshots import Snapshots
+from ...shared.page_links import PageLinks
+from ...shared.meta_properties import MetaProperties
+
+__all__ = ["SnapshotListResponse"]
+
+
+class SnapshotListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ links: Optional[PageLinks] = None
+
+ snapshots: Optional[List[Snapshots]] = None
diff --git a/src/gradient/types/gpu_droplets/volumes/snapshot_retrieve_response.py b/src/gradient/types/gpu_droplets/volumes/snapshot_retrieve_response.py
new file mode 100644
index 00000000..3defa47d
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/snapshot_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...shared.snapshots import Snapshots
+
+__all__ = ["SnapshotRetrieveResponse"]
+
+
+class SnapshotRetrieveResponse(BaseModel):
+ snapshot: Optional[Snapshots] = None
diff --git a/src/gradient/types/gpu_droplets/volumes/volume_action.py b/src/gradient/types/gpu_droplets/volumes/volume_action.py
new file mode 100644
index 00000000..e1c01f6c
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/volumes/volume_action.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...shared.action import Action
+
+__all__ = ["VolumeAction"]
+
+
+class VolumeAction(Action):
+ resource_id: Optional[int] = None # type: ignore
+
+ type: Optional[str] = None # type: ignore
+ """This is the type of action that the object represents.
+
+ For example, this could be "attach_volume" to represent the state of a volume
+ attach action.
+ """
diff --git a/src/gradient/types/image_generate_params.py b/src/gradient/types/image_generate_params.py
new file mode 100644
index 00000000..42e6144a
--- /dev/null
+++ b/src/gradient/types/image_generate_params.py
@@ -0,0 +1,100 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ImageGenerateParamsBase", "ImageGenerateParamsNonStreaming", "ImageGenerateParamsStreaming"]
+
+
+class ImageGenerateParamsBase(TypedDict, total=False):
+ prompt: Required[str]
+ """A text description of the desired image(s).
+
+ GPT-IMAGE-1 supports up to 32,000 characters and provides automatic prompt
+ optimization for best results.
+ """
+
+ background: Optional[str]
+ """The background setting for the image generation.
+
+ GPT-IMAGE-1 supports: transparent, opaque, auto.
+ """
+
+ model: str
+ """The model to use for image generation.
+
+ GPT-IMAGE-1 is the latest model offering the best quality with automatic
+ optimization and enhanced capabilities.
+ """
+
+ moderation: Optional[str]
+ """The moderation setting for the image generation.
+
+ GPT-IMAGE-1 supports: low, auto.
+ """
+
+ n: Optional[int]
+ """The number of images to generate. GPT-IMAGE-1 only supports n=1."""
+
+ output_compression: Optional[int]
+ """The output compression for the image generation. GPT-IMAGE-1 supports: 0-100."""
+
+ output_format: Optional[str]
+ """The output format for the image generation.
+
+ GPT-IMAGE-1 supports: png, webp, jpeg.
+ """
+
+ partial_images: Optional[int]
+ """The number of partial image chunks to return during streaming generation.
+
+ This parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+ """
+
+ quality: Optional[str]
+ """The quality of the image that will be generated.
+
+ GPT-IMAGE-1 supports: auto (automatically select best quality), high, medium,
+ low.
+ """
+
+ size: Optional[str]
+ """The size of the generated images.
+
+ GPT-IMAGE-1 supports: auto (automatically select best size), 1536x1024
+ (landscape), 1024x1536 (portrait).
+ """
+
+ user: Optional[str]
+ """
+ A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+ """
+
+
+class ImageGenerateParamsNonStreaming(ImageGenerateParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """
+ If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+ """
+
+
+class ImageGenerateParamsStreaming(ImageGenerateParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+ """
+
+
+ImageGenerateParams = Union[ImageGenerateParamsNonStreaming, ImageGenerateParamsStreaming]
diff --git a/src/gradient/types/image_generate_response.py b/src/gradient/types/image_generate_response.py
new file mode 100644
index 00000000..324e6038
--- /dev/null
+++ b/src/gradient/types/image_generate_response.py
@@ -0,0 +1,71 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+
+__all__ = ["ImageGenerateResponse", "Data", "Usage", "UsageInputTokensDetails"]
+
+
+class Data(BaseModel):
+ """Represents the content of a generated image from GPT-IMAGE-1"""
+
+ b64_json: str
+ """The base64-encoded JSON of the generated image.
+
+ GPT-IMAGE-1 returns images in b64_json format only.
+ """
+
+ revised_prompt: Optional[str] = None
+ """The optimized prompt that was used to generate the image.
+
+ GPT-IMAGE-1 automatically optimizes prompts for best results.
+ """
+
+
+class UsageInputTokensDetails(BaseModel):
+ """Detailed breakdown of input tokens"""
+
+ text_tokens: Optional[int] = None
+ """Number of text tokens in the input"""
+
+
+class Usage(BaseModel):
+ """Usage statistics for the image generation request"""
+
+ input_tokens: int
+ """Number of tokens in the input prompt"""
+
+ total_tokens: int
+ """Total number of tokens used (input + output)"""
+
+ input_tokens_details: Optional[UsageInputTokensDetails] = None
+ """Detailed breakdown of input tokens"""
+
+ output_tokens: Optional[int] = None
+ """Number of tokens in the generated output"""
+
+
+class ImageGenerateResponse(BaseModel):
+ """The response from the image generation endpoint"""
+
+ created: int
+ """The Unix timestamp (in seconds) of when the images were created"""
+
+ data: List[Data]
+ """The list of generated images"""
+
+ background: Optional[str] = None
+ """The background setting used for the image generation"""
+
+ output_format: Optional[str] = None
+ """The output format of the generated image"""
+
+ quality: Optional[str] = None
+ """The quality setting used for the image generation"""
+
+ size: Optional[str] = None
+ """The size of the generated image"""
+
+ usage: Optional[Usage] = None
+ """Usage statistics for the image generation request"""
diff --git a/src/gradient/types/inference/__init__.py b/src/gradient/types/inference/__init__.py
new file mode 100644
index 00000000..c3cbcd6d
--- /dev/null
+++ b/src/gradient/types/inference/__init__.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .api_key_list_params import APIKeyListParams as APIKeyListParams
+from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams
+from .api_key_list_response import APIKeyListResponse as APIKeyListResponse
+from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams
+from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo
+from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse
+from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse
+from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse
+from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse
diff --git a/src/gradient/types/inference/api_key_create_params.py b/src/gradient/types/inference/api_key_create_params.py
new file mode 100644
index 00000000..10edfbbe
--- /dev/null
+++ b/src/gradient/types/inference/api_key_create_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["APIKeyCreateParams"]
+
+
+class APIKeyCreateParams(TypedDict, total=False):
+ name: str
+ """A human friendly name to identify the key"""
diff --git a/src/gradient/types/inference/api_key_create_response.py b/src/gradient/types/inference/api_key_create_response.py
new file mode 100644
index 00000000..f2469e43
--- /dev/null
+++ b/src/gradient/types/inference/api_key_create_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_model_api_key_info import APIModelAPIKeyInfo
+
+__all__ = ["APIKeyCreateResponse"]
+
+
+class APIKeyCreateResponse(BaseModel):
+ api_key_info: Optional[APIModelAPIKeyInfo] = None
+ """Model API Key Info"""
diff --git a/src/gradient/types/inference/api_key_delete_response.py b/src/gradient/types/inference/api_key_delete_response.py
new file mode 100644
index 00000000..89102258
--- /dev/null
+++ b/src/gradient/types/inference/api_key_delete_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_model_api_key_info import APIModelAPIKeyInfo
+
+__all__ = ["APIKeyDeleteResponse"]
+
+
+class APIKeyDeleteResponse(BaseModel):
+ api_key_info: Optional[APIModelAPIKeyInfo] = None
+ """Model API Key Info"""
diff --git a/src/gradient/types/inference/api_key_list_params.py b/src/gradient/types/inference/api_key_list_params.py
new file mode 100644
index 00000000..1f8f96b7
--- /dev/null
+++ b/src/gradient/types/inference/api_key_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["APIKeyListParams"]
+
+
+class APIKeyListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/inference/api_key_list_response.py b/src/gradient/types/inference/api_key_list_response.py
new file mode 100644
index 00000000..7c474873
--- /dev/null
+++ b/src/gradient/types/inference/api_key_list_response.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.api_meta import APIMeta
+from ..shared.api_links import APILinks
+from .api_model_api_key_info import APIModelAPIKeyInfo
+
+__all__ = ["APIKeyListResponse"]
+
+
+class APIKeyListResponse(BaseModel):
+ api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None
+ """Api key infos"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/inference/api_key_update_params.py b/src/gradient/types/inference/api_key_update_params.py
new file mode 100644
index 00000000..7f79240a
--- /dev/null
+++ b/src/gradient/types/inference/api_key_update_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+
+__all__ = ["APIKeyUpdateParams"]
+
+
+class APIKeyUpdateParams(TypedDict, total=False):
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+ """API key ID"""
+
+ name: str
+ """Name"""
diff --git a/src/gradient/types/inference/api_key_update_regenerate_response.py b/src/gradient/types/inference/api_key_update_regenerate_response.py
new file mode 100644
index 00000000..c7ce5f0a
--- /dev/null
+++ b/src/gradient/types/inference/api_key_update_regenerate_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_model_api_key_info import APIModelAPIKeyInfo
+
+__all__ = ["APIKeyUpdateRegenerateResponse"]
+
+
+class APIKeyUpdateRegenerateResponse(BaseModel):
+ api_key_info: Optional[APIModelAPIKeyInfo] = None
+ """Model API Key Info"""
diff --git a/src/gradient/types/inference/api_key_update_response.py b/src/gradient/types/inference/api_key_update_response.py
new file mode 100644
index 00000000..1b7f92ef
--- /dev/null
+++ b/src/gradient/types/inference/api_key_update_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_model_api_key_info import APIModelAPIKeyInfo
+
+__all__ = ["APIKeyUpdateResponse"]
+
+
+class APIKeyUpdateResponse(BaseModel):
+ api_key_info: Optional[APIModelAPIKeyInfo] = None
+ """Model API Key Info"""
diff --git a/src/gradient/types/inference/api_model_api_key_info.py b/src/gradient/types/inference/api_model_api_key_info.py
new file mode 100644
index 00000000..28f96839
--- /dev/null
+++ b/src/gradient/types/inference/api_model_api_key_info.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+
+from ..._models import BaseModel
+
+__all__ = ["APIModelAPIKeyInfo"]
+
+
+class APIModelAPIKeyInfo(BaseModel):
+ """Model API Key Info"""
+
+ created_at: Optional[datetime] = None
+ """Creation date"""
+
+ created_by: Optional[str] = None
+ """Created by"""
+
+ deleted_at: Optional[datetime] = None
+ """Deleted date"""
+
+ name: Optional[str] = None
+ """Name"""
+
+ secret_key: Optional[str] = None
+
+ uuid: Optional[str] = None
+ """Uuid"""
diff --git a/src/gradient/types/knowledge_base_create_params.py b/src/gradient/types/knowledge_base_create_params.py
new file mode 100644
index 00000000..2c7bece1
--- /dev/null
+++ b/src/gradient/types/knowledge_base_create_params.py
@@ -0,0 +1,151 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, TypedDict
+
+from .._types import SequenceNotStr
+from .knowledge_bases.aws_data_source_param import AwsDataSourceParam
+from .knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam
+from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam
+from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam
+
+__all__ = [
+ "KnowledgeBaseCreateParams",
+ "Datasource",
+ "DatasourceChunkingOptions",
+ "DatasourceDropboxDataSource",
+ "DatasourceGoogleDriveDataSource",
+]
+
+
+class KnowledgeBaseCreateParams(TypedDict, total=False):
+ database_id: str
+ """
+ Identifier of the DigitalOcean OpenSearch database this knowledge base will use,
+ optional. If not provided, we create a new database for the knowledge base in
+ the same region as the knowledge base.
+ """
+
+ datasources: Iterable[Datasource]
+ """The data sources to use for this knowledge base.
+
+ See
+ [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets)
+ for more information on data sources best practices.
+ """
+
+ embedding_model_uuid: str
+ """
+ Identifier for the
+ [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models).
+ """
+
+ name: str
+ """Name of the knowledge base."""
+
+ project_id: str
+ """Identifier of the DigitalOcean project this knowledge base will belong to."""
+
+ region: str
+ """The datacenter region to deploy the knowledge base in."""
+
+ tags: SequenceNotStr[str]
+ """Tags to organize your knowledge base."""
+
+ vpc_uuid: str
+ """The VPC to deploy the knowledge base database in"""
+
+
+class DatasourceChunkingOptions(TypedDict, total=False):
+ """Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature preview flag.**
+ """
+
+ child_chunk_size: int
+ """Hierarchical options"""
+
+ max_chunk_size: int
+ """Section_Based and Fixed_Length options"""
+
+ parent_chunk_size: int
+ """Hierarchical options"""
+
+ semantic_threshold: float
+ """Semantic options"""
+
+
+class DatasourceDropboxDataSource(TypedDict, total=False):
+ """Dropbox Data Source"""
+
+ folder: str
+
+ refresh_token: str
+ """Refresh token.
+
+ you can obrain a refresh token by following the oauth2 flow. see
+ /v2/gen-ai/oauth2/dropbox/tokens for reference.
+ """
+
+
+class DatasourceGoogleDriveDataSource(TypedDict, total=False):
+ """Google Drive Data Source"""
+
+ folder_id: str
+
+ refresh_token: str
+ """Refresh token.
+
+ you can obrain a refresh token by following the oauth2 flow. see
+ /v2/gen-ai/oauth2/google/tokens for reference.
+ """
+
+
+class Datasource(TypedDict, total=False):
+ aws_data_source: AwsDataSourceParam
+ """AWS S3 Data Source"""
+
+ bucket_name: str
+ """Deprecated, moved to data_source_details"""
+
+ bucket_region: str
+ """Deprecated, moved to data_source_details"""
+
+ chunking_algorithm: Literal[
+ "CHUNKING_ALGORITHM_UNKNOWN",
+ "CHUNKING_ALGORITHM_SECTION_BASED",
+ "CHUNKING_ALGORITHM_HIERARCHICAL",
+ "CHUNKING_ALGORITHM_SEMANTIC",
+ "CHUNKING_ALGORITHM_FIXED_LENGTH",
+ ]
+ """The chunking algorithm to use for processing data sources.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+ """
+
+ chunking_options: DatasourceChunkingOptions
+ """Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+ """
+
+ dropbox_data_source: DatasourceDropboxDataSource
+ """Dropbox Data Source"""
+
+ file_upload_data_source: APIFileUploadDataSourceParam
+ """File to upload as data source for knowledge base."""
+
+ google_drive_data_source: DatasourceGoogleDriveDataSource
+ """Google Drive Data Source"""
+
+ item_path: str
+
+ spaces_data_source: APISpacesDataSourceParam
+ """Spaces Bucket Data Source"""
+
+ web_crawler_data_source: APIWebCrawlerDataSourceParam
+ """WebCrawlerDataSource"""
diff --git a/src/gradient/types/knowledge_base_create_response.py b/src/gradient/types/knowledge_base_create_response.py
new file mode 100644
index 00000000..72f0b58c
--- /dev/null
+++ b/src/gradient/types/knowledge_base_create_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+from .api_knowledge_base import APIKnowledgeBase
+
+__all__ = ["KnowledgeBaseCreateResponse"]
+
+
+class KnowledgeBaseCreateResponse(BaseModel):
+ """Information about a newly created knowledge base"""
+
+ knowledge_base: Optional[APIKnowledgeBase] = None
+ """Knowledgebase Description"""
diff --git a/src/gradient/types/knowledge_base_delete_response.py b/src/gradient/types/knowledge_base_delete_response.py
new file mode 100644
index 00000000..cdf154ba
--- /dev/null
+++ b/src/gradient/types/knowledge_base_delete_response.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["KnowledgeBaseDeleteResponse"]
+
+
+class KnowledgeBaseDeleteResponse(BaseModel):
+ """Information about a deleted knowledge base"""
+
+ uuid: Optional[str] = None
+ """The id of the deleted knowledge base"""
diff --git a/src/gradient/types/knowledge_base_list_indexing_jobs_response.py b/src/gradient/types/knowledge_base_list_indexing_jobs_response.py
new file mode 100644
index 00000000..f5376c61
--- /dev/null
+++ b/src/gradient/types/knowledge_base_list_indexing_jobs_response.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .shared.api_meta import APIMeta
+from .shared.api_links import APILinks
+from .knowledge_bases.api_indexing_job import APIIndexingJob
+
+__all__ = ["KnowledgeBaseListIndexingJobsResponse"]
+
+
+class KnowledgeBaseListIndexingJobsResponse(BaseModel):
+ """Indexing jobs"""
+
+ jobs: Optional[List[APIIndexingJob]] = None
+ """The indexing jobs"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/knowledge_base_list_params.py b/src/gradient/types/knowledge_base_list_params.py
new file mode 100644
index 00000000..b2c0eb31
--- /dev/null
+++ b/src/gradient/types/knowledge_base_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KnowledgeBaseListParams"]
+
+
+class KnowledgeBaseListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/knowledge_base_list_response.py b/src/gradient/types/knowledge_base_list_response.py
new file mode 100644
index 00000000..3231f643
--- /dev/null
+++ b/src/gradient/types/knowledge_base_list_response.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .shared.api_meta import APIMeta
+from .shared.api_links import APILinks
+from .api_knowledge_base import APIKnowledgeBase
+
+__all__ = ["KnowledgeBaseListResponse"]
+
+
+class KnowledgeBaseListResponse(BaseModel):
+ """List of knowledge bases"""
+
+ knowledge_bases: Optional[List[APIKnowledgeBase]] = None
+ """The knowledge bases"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/knowledge_base_retrieve_response.py b/src/gradient/types/knowledge_base_retrieve_response.py
new file mode 100644
index 00000000..712f858c
--- /dev/null
+++ b/src/gradient/types/knowledge_base_retrieve_response.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .api_knowledge_base import APIKnowledgeBase
+
+__all__ = ["KnowledgeBaseRetrieveResponse"]
+
+
+class KnowledgeBaseRetrieveResponse(BaseModel):
+ """The knowledge base"""
+
+ database_status: Optional[
+ Literal[
+ "CREATING",
+ "ONLINE",
+ "POWEROFF",
+ "REBUILDING",
+ "REBALANCING",
+ "DECOMMISSIONED",
+ "FORKING",
+ "MIGRATING",
+ "RESIZING",
+ "RESTORING",
+ "POWERING_ON",
+ "UNHEALTHY",
+ ]
+ ] = None
+
+ knowledge_base: Optional[APIKnowledgeBase] = None
+ """Knowledgebase Description"""
diff --git a/src/gradient/types/knowledge_base_update_params.py b/src/gradient/types/knowledge_base_update_params.py
new file mode 100644
index 00000000..cfb52016
--- /dev/null
+++ b/src/gradient/types/knowledge_base_update_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from .._types import SequenceNotStr
+from .._utils import PropertyInfo
+
+__all__ = ["KnowledgeBaseUpdateParams"]
+
+
+class KnowledgeBaseUpdateParams(TypedDict, total=False):
+ database_id: str
+ """The id of the DigitalOcean database this knowledge base will use, optiona."""
+
+ embedding_model_uuid: str
+ """Identifier for the foundation model."""
+
+ name: str
+ """Knowledge base name"""
+
+ project_id: str
+ """The id of the DigitalOcean project this knowledge base will belong to"""
+
+ tags: SequenceNotStr[str]
+ """Tags to organize your knowledge base."""
+
+ body_uuid: Annotated[str, PropertyInfo(alias="uuid")]
+ """Knowledge base id"""
diff --git a/src/gradient/types/knowledge_base_update_response.py b/src/gradient/types/knowledge_base_update_response.py
new file mode 100644
index 00000000..0e4ff539
--- /dev/null
+++ b/src/gradient/types/knowledge_base_update_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+from .api_knowledge_base import APIKnowledgeBase
+
+__all__ = ["KnowledgeBaseUpdateResponse"]
+
+
+class KnowledgeBaseUpdateResponse(BaseModel):
+ """Information about an updated knowledge base"""
+
+ knowledge_base: Optional[APIKnowledgeBase] = None
+ """Knowledgebase Description"""
diff --git a/src/gradient/types/knowledge_bases/__init__.py b/src/gradient/types/knowledge_bases/__init__.py
new file mode 100644
index 00000000..e7b88be8
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/__init__.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .api_indexing_job import APIIndexingJob as APIIndexingJob
+from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam
+from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource
+from .api_indexed_data_source import APIIndexedDataSource as APIIndexedDataSource
+from .data_source_list_params import DataSourceListParams as DataSourceListParams
+from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams
+from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams
+from .data_source_list_response import DataSourceListResponse as DataSourceListResponse
+from .data_source_update_params import DataSourceUpdateParams as DataSourceUpdateParams
+from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams
+from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse
+from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource
+from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource
+from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse
+from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse
+from .data_source_update_response import DataSourceUpdateResponse as DataSourceUpdateResponse
+from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam
+from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse
+from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource
+from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse
+from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam
+from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam
+from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams
+from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse
+from .data_source_create_presigned_urls_params import (
+ DataSourceCreatePresignedURLsParams as DataSourceCreatePresignedURLsParams,
+)
+from .indexing_job_retrieve_signed_url_response import (
+ IndexingJobRetrieveSignedURLResponse as IndexingJobRetrieveSignedURLResponse,
+)
+from .data_source_create_presigned_urls_response import (
+ DataSourceCreatePresignedURLsResponse as DataSourceCreatePresignedURLsResponse,
+)
+from .indexing_job_retrieve_data_sources_response import (
+ IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse,
+)
diff --git a/src/gradient/types/knowledge_bases/api_file_upload_data_source.py b/src/gradient/types/knowledge_bases/api_file_upload_data_source.py
new file mode 100644
index 00000000..6aaeb0e7
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/api_file_upload_data_source.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["APIFileUploadDataSource"]
+
+
+class APIFileUploadDataSource(BaseModel):
+ """File to upload as data source for knowledge base."""
+
+ original_file_name: Optional[str] = None
+ """The original file name"""
+
+ size_in_bytes: Optional[str] = None
+ """The size of the file in bytes"""
+
+ stored_object_key: Optional[str] = None
+ """The object key the file was stored as"""
diff --git a/src/gradient/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradient/types/knowledge_bases/api_file_upload_data_source_param.py
new file mode 100644
index 00000000..3cdd34ee
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/api_file_upload_data_source_param.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["APIFileUploadDataSourceParam"]
+
+
+class APIFileUploadDataSourceParam(TypedDict, total=False):
+ """File to upload as data source for knowledge base."""
+
+ original_file_name: str
+ """The original file name"""
+
+ size_in_bytes: str
+ """The size of the file in bytes"""
+
+ stored_object_key: str
+ """The object key the file was stored as"""
diff --git a/src/gradient/types/knowledge_bases/api_indexed_data_source.py b/src/gradient/types/knowledge_bases/api_indexed_data_source.py
new file mode 100644
index 00000000..3f011582
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/api_indexed_data_source.py
@@ -0,0 +1,62 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["APIIndexedDataSource"]
+
+
+class APIIndexedDataSource(BaseModel):
+ completed_at: Optional[datetime] = None
+ """Timestamp when data source completed indexing"""
+
+ data_source_uuid: Optional[str] = None
+ """Uuid of the indexed data source"""
+
+ error_details: Optional[str] = None
+ """A detailed error description"""
+
+ error_msg: Optional[str] = None
+ """A string code provinding a hint which part of the system experienced an error"""
+
+ failed_item_count: Optional[str] = None
+ """Total count of files that have failed"""
+
+ indexed_file_count: Optional[str] = None
+ """Total count of files that have been indexed"""
+
+ indexed_item_count: Optional[str] = None
+ """Total count of files that have been indexed"""
+
+ removed_item_count: Optional[str] = None
+ """Total count of files that have been removed"""
+
+ skipped_item_count: Optional[str] = None
+ """Total count of files that have been skipped"""
+
+ started_at: Optional[datetime] = None
+ """Timestamp when data source started indexing"""
+
+ status: Optional[
+ Literal[
+ "DATA_SOURCE_STATUS_UNKNOWN",
+ "DATA_SOURCE_STATUS_IN_PROGRESS",
+ "DATA_SOURCE_STATUS_UPDATED",
+ "DATA_SOURCE_STATUS_PARTIALLY_UPDATED",
+ "DATA_SOURCE_STATUS_NOT_UPDATED",
+ "DATA_SOURCE_STATUS_FAILED",
+ "DATA_SOURCE_STATUS_CANCELLED",
+ ]
+ ] = None
+
+ total_bytes: Optional[str] = None
+ """Total size of files in data source in bytes"""
+
+ total_bytes_indexed: Optional[str] = None
+ """Total size of files in data source in bytes that have been indexed"""
+
+ total_file_count: Optional[str] = None
+ """Total file count in the data source"""
diff --git a/src/gradient/types/knowledge_bases/api_indexing_job.py b/src/gradient/types/knowledge_bases/api_indexing_job.py
new file mode 100644
index 00000000..d43ddd6e
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/api_indexing_job.py
@@ -0,0 +1,75 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .api_indexed_data_source import APIIndexedDataSource
+
+__all__ = ["APIIndexingJob"]
+
+
+class APIIndexingJob(BaseModel):
+ """IndexingJob description"""
+
+ completed_datasources: Optional[int] = None
+ """Number of datasources indexed completed"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ data_source_jobs: Optional[List[APIIndexedDataSource]] = None
+ """Details on Data Sources included in the Indexing Job"""
+
+ data_source_uuids: Optional[List[str]] = None
+
+ finished_at: Optional[datetime] = None
+
+ is_report_available: Optional[bool] = None
+ """Boolean value to determine if the indexing job details are available"""
+
+ knowledge_base_uuid: Optional[str] = None
+ """Knowledge base id"""
+
+ phase: Optional[
+ Literal[
+ "BATCH_JOB_PHASE_UNKNOWN",
+ "BATCH_JOB_PHASE_PENDING",
+ "BATCH_JOB_PHASE_RUNNING",
+ "BATCH_JOB_PHASE_SUCCEEDED",
+ "BATCH_JOB_PHASE_FAILED",
+ "BATCH_JOB_PHASE_ERROR",
+ "BATCH_JOB_PHASE_CANCELLED",
+ ]
+ ] = None
+
+ started_at: Optional[datetime] = None
+
+ status: Optional[
+ Literal[
+ "INDEX_JOB_STATUS_UNKNOWN",
+ "INDEX_JOB_STATUS_PARTIAL",
+ "INDEX_JOB_STATUS_IN_PROGRESS",
+ "INDEX_JOB_STATUS_COMPLETED",
+ "INDEX_JOB_STATUS_FAILED",
+ "INDEX_JOB_STATUS_NO_CHANGES",
+ "INDEX_JOB_STATUS_PENDING",
+ "INDEX_JOB_STATUS_CANCELLED",
+ ]
+ ] = None
+
+ tokens: Optional[int] = None
+ """Number of tokens [This field is deprecated]"""
+
+ total_datasources: Optional[int] = None
+ """Number of datasources being indexed"""
+
+ total_tokens: Optional[str] = None
+ """Total Tokens Consumed By the Indexing Job"""
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
diff --git a/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py
new file mode 100644
index 00000000..ef5db1a5
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py
@@ -0,0 +1,129 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .api_spaces_data_source import APISpacesDataSource
+from .api_indexed_data_source import APIIndexedDataSource
+from .api_file_upload_data_source import APIFileUploadDataSource
+from .api_web_crawler_data_source import APIWebCrawlerDataSource
+
+__all__ = [
+ "APIKnowledgeBaseDataSource",
+ "AwsDataSource",
+ "ChunkingOptions",
+ "DropboxDataSource",
+ "GoogleDriveDataSource",
+]
+
+
+class AwsDataSource(BaseModel):
+ """AWS S3 Data Source for Display"""
+
+ bucket_name: Optional[str] = None
+ """Spaces bucket name"""
+
+ item_path: Optional[str] = None
+
+ region: Optional[str] = None
+ """Region of bucket"""
+
+
+class ChunkingOptions(BaseModel):
+ """Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature preview flag.**
+ """
+
+ child_chunk_size: Optional[int] = None
+ """Hierarchical options"""
+
+ max_chunk_size: Optional[int] = None
+ """Section_Based and Fixed_Length options"""
+
+ parent_chunk_size: Optional[int] = None
+ """Hierarchical options"""
+
+ semantic_threshold: Optional[float] = None
+ """Semantic options"""
+
+
+class DropboxDataSource(BaseModel):
+ """Dropbox Data Source for Display"""
+
+ folder: Optional[str] = None
+
+
+class GoogleDriveDataSource(BaseModel):
+ """Google Drive Data Source for Display"""
+
+ folder_id: Optional[str] = None
+
+ folder_name: Optional[str] = None
+ """Name of the selected folder if available"""
+
+
+class APIKnowledgeBaseDataSource(BaseModel):
+ """Data Source configuration for Knowledge Bases"""
+
+ aws_data_source: Optional[AwsDataSource] = None
+ """AWS S3 Data Source for Display"""
+
+ bucket_name: Optional[str] = None
+ """Name of storage bucket - Deprecated, moved to data_source_details"""
+
+ chunking_algorithm: Optional[
+ Literal[
+ "CHUNKING_ALGORITHM_UNKNOWN",
+ "CHUNKING_ALGORITHM_SECTION_BASED",
+ "CHUNKING_ALGORITHM_HIERARCHICAL",
+ "CHUNKING_ALGORITHM_SEMANTIC",
+ "CHUNKING_ALGORITHM_FIXED_LENGTH",
+ ]
+ ] = None
+ """The chunking algorithm to use for processing data sources.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+ """
+
+ chunking_options: Optional[ChunkingOptions] = None
+ """Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+ """
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ dropbox_data_source: Optional[DropboxDataSource] = None
+ """Dropbox Data Source for Display"""
+
+ file_upload_data_source: Optional[APIFileUploadDataSource] = None
+ """File to upload as data source for knowledge base."""
+
+ google_drive_data_source: Optional[GoogleDriveDataSource] = None
+ """Google Drive Data Source for Display"""
+
+ item_path: Optional[str] = None
+ """Path of folder or object in bucket - Deprecated, moved to data_source_details"""
+
+ last_datasource_indexing_job: Optional[APIIndexedDataSource] = None
+
+ region: Optional[str] = None
+ """Region code - Deprecated, moved to data_source_details"""
+
+ spaces_data_source: Optional[APISpacesDataSource] = None
+ """Spaces Bucket Data Source"""
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ uuid: Optional[str] = None
+ """Unique id of knowledge base"""
+
+ web_crawler_data_source: Optional[APIWebCrawlerDataSource] = None
+ """WebCrawlerDataSource"""
diff --git a/src/gradient/types/knowledge_bases/api_spaces_data_source.py b/src/gradient/types/knowledge_bases/api_spaces_data_source.py
new file mode 100644
index 00000000..2ac76d69
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/api_spaces_data_source.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["APISpacesDataSource"]
+
+
+class APISpacesDataSource(BaseModel):
+ """Spaces Bucket Data Source"""
+
+ bucket_name: Optional[str] = None
+ """Spaces bucket name"""
+
+ item_path: Optional[str] = None
+
+ region: Optional[str] = None
+ """Region of bucket"""
diff --git a/src/gradient/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradient/types/knowledge_bases/api_spaces_data_source_param.py
new file mode 100644
index 00000000..9c3daf03
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/api_spaces_data_source_param.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["APISpacesDataSourceParam"]
+
+
+class APISpacesDataSourceParam(TypedDict, total=False):
+ """Spaces Bucket Data Source"""
+
+ bucket_name: str
+ """Spaces bucket name"""
+
+ item_path: str
+
+ region: str
+ """Region of bucket"""
diff --git a/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py b/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py
new file mode 100644
index 00000000..ba1ee81f
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["APIWebCrawlerDataSource"]
+
+
+class APIWebCrawlerDataSource(BaseModel):
+ """WebCrawlerDataSource"""
+
+ base_url: Optional[str] = None
+ """The base url to crawl."""
+
+ crawling_option: Optional[Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS", "SITEMAP"]] = None
+ """Options for specifying how URLs found on pages should be handled.
+
+ - UNKNOWN: Default unknown value
+ - SCOPED: Only include the base URL.
+ - PATH: Crawl the base URL and linked pages within the URL path.
+ - DOMAIN: Crawl the base URL and linked pages within the same domain.
+ - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain.
+ - SITEMAP: Crawl URLs discovered in the sitemap.
+ """
+
+ embed_media: Optional[bool] = None
+ """Whether to ingest and index media (images, etc.) on web pages."""
+
+ exclude_tags: Optional[List[str]] = None
+ """Declaring which tags to exclude in web pages while webcrawling"""
diff --git a/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py
new file mode 100644
index 00000000..ff4f3307
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = ["APIWebCrawlerDataSourceParam"]
+
+
+class APIWebCrawlerDataSourceParam(TypedDict, total=False):
+ """WebCrawlerDataSource"""
+
+ base_url: str
+ """The base url to crawl."""
+
+ crawling_option: Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS", "SITEMAP"]
+ """Options for specifying how URLs found on pages should be handled.
+
+ - UNKNOWN: Default unknown value
+ - SCOPED: Only include the base URL.
+ - PATH: Crawl the base URL and linked pages within the URL path.
+ - DOMAIN: Crawl the base URL and linked pages within the same domain.
+ - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain.
+ - SITEMAP: Crawl URLs discovered in the sitemap.
+ """
+
+ embed_media: bool
+ """Whether to ingest and index media (images, etc.) on web pages."""
+
+ exclude_tags: SequenceNotStr[str]
+ """Declaring which tags to exclude in web pages while webcrawling"""
diff --git a/src/gradient/types/knowledge_bases/aws_data_source_param.py b/src/gradient/types/knowledge_bases/aws_data_source_param.py
new file mode 100644
index 00000000..fa99a8c1
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/aws_data_source_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AwsDataSourceParam"]
+
+
+class AwsDataSourceParam(TypedDict, total=False):
+ """AWS S3 Data Source"""
+
+ bucket_name: str
+ """Spaces bucket name"""
+
+ item_path: str
+
+ key_id: str
+ """The AWS Key ID"""
+
+ region: str
+ """Region of bucket"""
+
+ secret_key: str
+ """The AWS Secret Key"""
diff --git a/src/gradient/types/knowledge_bases/data_source_create_params.py b/src/gradient/types/knowledge_bases/data_source_create_params.py
new file mode 100644
index 00000000..bc65e42a
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_create_params.py
@@ -0,0 +1,65 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+from .aws_data_source_param import AwsDataSourceParam
+from .api_spaces_data_source_param import APISpacesDataSourceParam
+from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam
+
+__all__ = ["DataSourceCreateParams", "ChunkingOptions"]
+
+
+class DataSourceCreateParams(TypedDict, total=False):
+ aws_data_source: AwsDataSourceParam
+ """AWS S3 Data Source"""
+
+ chunking_algorithm: Literal[
+ "CHUNKING_ALGORITHM_UNKNOWN",
+ "CHUNKING_ALGORITHM_SECTION_BASED",
+ "CHUNKING_ALGORITHM_HIERARCHICAL",
+ "CHUNKING_ALGORITHM_SEMANTIC",
+ "CHUNKING_ALGORITHM_FIXED_LENGTH",
+ ]
+ """The chunking algorithm to use for processing data sources.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+ """
+
+ chunking_options: ChunkingOptions
+ """Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+ """
+
+ body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")]
+ """Knowledge base id"""
+
+ spaces_data_source: APISpacesDataSourceParam
+ """Spaces Bucket Data Source"""
+
+ web_crawler_data_source: APIWebCrawlerDataSourceParam
+ """WebCrawlerDataSource"""
+
+
+class ChunkingOptions(TypedDict, total=False):
+ """Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature preview flag.**
+ """
+
+ child_chunk_size: int
+ """Hierarchical options"""
+
+ max_chunk_size: int
+ """Section_Based and Fixed_Length options"""
+
+ parent_chunk_size: int
+ """Hierarchical options"""
+
+ semantic_threshold: float
+ """Semantic options"""
diff --git a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py
new file mode 100644
index 00000000..1d27f0ca
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import TypedDict
+
+__all__ = ["DataSourceCreatePresignedURLsParams", "File"]
+
+
+class DataSourceCreatePresignedURLsParams(TypedDict, total=False):
+ files: Iterable[File]
+ """A list of files to generate presigned URLs for."""
+
+
+class File(TypedDict, total=False):
+ """A single file’s metadata in the request."""
+
+ file_name: str
+ """Local filename"""
+
+ file_size: str
+ """The size of the file in bytes."""
diff --git a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py
new file mode 100644
index 00000000..daca9865
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ..._models import BaseModel
+
+__all__ = ["DataSourceCreatePresignedURLsResponse", "Upload"]
+
+
+class Upload(BaseModel):
+ """Detailed info about each presigned URL returned to the client."""
+
+ expires_at: Optional[datetime] = None
+ """The time the url expires at."""
+
+ object_key: Optional[str] = None
+ """The unique object key to store the file as."""
+
+ original_file_name: Optional[str] = None
+ """The original file name."""
+
+ presigned_url: Optional[str] = None
+ """The actual presigned URL the client can use to upload the file directly."""
+
+
+class DataSourceCreatePresignedURLsResponse(BaseModel):
+ """Response with pre-signed urls to upload files."""
+
+ request_id: Optional[str] = None
+ """The ID generated for the request for Presigned URLs."""
+
+ uploads: Optional[List[Upload]] = None
+ """A list of generated presigned URLs and object keys, one per file."""
diff --git a/src/gradient/types/knowledge_bases/data_source_create_response.py b/src/gradient/types/knowledge_bases/data_source_create_response.py
new file mode 100644
index 00000000..da49d870
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_create_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource
+
+__all__ = ["DataSourceCreateResponse"]
+
+
+class DataSourceCreateResponse(BaseModel):
+ """Information about a newly created knowldege base data source"""
+
+ knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None
+ """Data Source configuration for Knowledge Bases"""
diff --git a/src/gradient/types/knowledge_bases/data_source_delete_response.py b/src/gradient/types/knowledge_bases/data_source_delete_response.py
new file mode 100644
index 00000000..fc3e59da
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_delete_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["DataSourceDeleteResponse"]
+
+
+class DataSourceDeleteResponse(BaseModel):
+ """Information about a newly deleted knowledge base data source"""
+
+ data_source_uuid: Optional[str] = None
+ """Data source id"""
+
+ knowledge_base_uuid: Optional[str] = None
+ """Knowledge base id"""
diff --git a/src/gradient/types/knowledge_bases/data_source_list_params.py b/src/gradient/types/knowledge_bases/data_source_list_params.py
new file mode 100644
index 00000000..089eb291
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["DataSourceListParams"]
+
+
+class DataSourceListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/knowledge_bases/data_source_list_response.py b/src/gradient/types/knowledge_bases/data_source_list_response.py
new file mode 100644
index 00000000..5de5d372
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_list_response.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.api_meta import APIMeta
+from ..shared.api_links import APILinks
+from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource
+
+__all__ = ["DataSourceListResponse"]
+
+
+class DataSourceListResponse(BaseModel):
+ """A list of knowledge base data sources"""
+
+ knowledge_base_data_sources: Optional[List[APIKnowledgeBaseDataSource]] = None
+ """The data sources"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/knowledge_bases/data_source_update_params.py b/src/gradient/types/knowledge_bases/data_source_update_params.py
new file mode 100644
index 00000000..ffcdf95b
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_update_params.py
@@ -0,0 +1,58 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+
+__all__ = ["DataSourceUpdateParams", "ChunkingOptions"]
+
+
+class DataSourceUpdateParams(TypedDict, total=False):
+ path_knowledge_base_uuid: Required[Annotated[str, PropertyInfo(alias="knowledge_base_uuid")]]
+
+ chunking_algorithm: Literal[
+ "CHUNKING_ALGORITHM_UNKNOWN",
+ "CHUNKING_ALGORITHM_SECTION_BASED",
+ "CHUNKING_ALGORITHM_HIERARCHICAL",
+ "CHUNKING_ALGORITHM_SEMANTIC",
+ "CHUNKING_ALGORITHM_FIXED_LENGTH",
+ ]
+ """The chunking algorithm to use for processing data sources.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+ """
+
+ chunking_options: ChunkingOptions
+ """Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature
+ preview flag.**
+ """
+
+ body_data_source_uuid: Annotated[str, PropertyInfo(alias="data_source_uuid")]
+ """Data Source ID (Path Parameter)"""
+
+ body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")]
+ """Knowledge Base ID (Path Parameter)"""
+
+
+class ChunkingOptions(TypedDict, total=False):
+ """Configuration options for the chunking algorithm.
+
+ **Note: This feature requires enabling the knowledgebase enhancements feature preview flag.**
+ """
+
+ child_chunk_size: int
+ """Hierarchical options"""
+
+ max_chunk_size: int
+ """Section_Based and Fixed_Length options"""
+
+ parent_chunk_size: int
+ """Hierarchical options"""
+
+ semantic_threshold: float
+ """Semantic options"""
diff --git a/src/gradient/types/knowledge_bases/data_source_update_response.py b/src/gradient/types/knowledge_bases/data_source_update_response.py
new file mode 100644
index 00000000..31484137
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_update_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource
+
+__all__ = ["DataSourceUpdateResponse"]
+
+
+class DataSourceUpdateResponse(BaseModel):
+ """
+ Update a data source of a knowledge base with change in chunking algorithm/options
+ """
+
+ knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None
+ """Data Source configuration for Knowledge Bases"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_create_params.py b/src/gradient/types/knowledge_bases/indexing_job_create_params.py
new file mode 100644
index 00000000..ebd8632b
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/indexing_job_create_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = ["IndexingJobCreateParams"]
+
+
+class IndexingJobCreateParams(TypedDict, total=False):
+ data_source_uuids: SequenceNotStr[str]
+ """
+ List of data source ids to index, if none are provided, all data sources will be
+ indexed
+ """
+
+ knowledge_base_uuid: str
+ """Knowledge base id"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_create_response.py b/src/gradient/types/knowledge_bases/indexing_job_create_response.py
new file mode 100644
index 00000000..df7e6911
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/indexing_job_create_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_indexing_job import APIIndexingJob
+
+__all__ = ["IndexingJobCreateResponse"]
+
+
+class IndexingJobCreateResponse(BaseModel):
+ """StartKnowledgeBaseIndexingJobOutput description"""
+
+ job: Optional[APIIndexingJob] = None
+ """IndexingJob description"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_list_params.py b/src/gradient/types/knowledge_bases/indexing_job_list_params.py
new file mode 100644
index 00000000..c9ac560e
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/indexing_job_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["IndexingJobListParams"]
+
+
+class IndexingJobListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_list_response.py b/src/gradient/types/knowledge_bases/indexing_job_list_response.py
new file mode 100644
index 00000000..374533df
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/indexing_job_list_response.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from ..shared.api_meta import APIMeta
+from .api_indexing_job import APIIndexingJob
+from ..shared.api_links import APILinks
+
+__all__ = ["IndexingJobListResponse"]
+
+
+class IndexingJobListResponse(BaseModel):
+ """Indexing jobs"""
+
+ jobs: Optional[List[APIIndexingJob]] = None
+ """The indexing jobs"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/gradient/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py
new file mode 100644
index 00000000..dd0e317e
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+from .api_indexed_data_source import APIIndexedDataSource
+
+__all__ = ["IndexingJobRetrieveDataSourcesResponse"]
+
+
+class IndexingJobRetrieveDataSourcesResponse(BaseModel):
+ indexed_data_sources: Optional[List[APIIndexedDataSource]] = None
diff --git a/src/gradient/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradient/types/knowledge_bases/indexing_job_retrieve_response.py
new file mode 100644
index 00000000..ea24de65
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/indexing_job_retrieve_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_indexing_job import APIIndexingJob
+
+__all__ = ["IndexingJobRetrieveResponse"]
+
+
+class IndexingJobRetrieveResponse(BaseModel):
+ """GetKnowledgeBaseIndexingJobOutput description"""
+
+ job: Optional[APIIndexingJob] = None
+ """IndexingJob description"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_retrieve_signed_url_response.py b/src/gradient/types/knowledge_bases/indexing_job_retrieve_signed_url_response.py
new file mode 100644
index 00000000..2ef60e45
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/indexing_job_retrieve_signed_url_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["IndexingJobRetrieveSignedURLResponse"]
+
+
+class IndexingJobRetrieveSignedURLResponse(BaseModel):
+ signed_url: Optional[str] = None
+ """The signed url for downloading the indexing job details"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_update_cancel_params.py b/src/gradient/types/knowledge_bases/indexing_job_update_cancel_params.py
new file mode 100644
index 00000000..9359a42a
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/indexing_job_update_cancel_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ..._utils import PropertyInfo
+
+__all__ = ["IndexingJobUpdateCancelParams"]
+
+
+class IndexingJobUpdateCancelParams(TypedDict, total=False):
+ body_uuid: Annotated[str, PropertyInfo(alias="uuid")]
+ """A unique identifier for an indexing job."""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradient/types/knowledge_bases/indexing_job_update_cancel_response.py
new file mode 100644
index 00000000..2622779b
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/indexing_job_update_cancel_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_indexing_job import APIIndexingJob
+
+__all__ = ["IndexingJobUpdateCancelResponse"]
+
+
+class IndexingJobUpdateCancelResponse(BaseModel):
+ """CancelKnowledgeBaseIndexingJobOutput description"""
+
+ job: Optional[APIIndexingJob] = None
+ """IndexingJob description"""
diff --git a/src/gradient/types/model_list_params.py b/src/gradient/types/model_list_params.py
new file mode 100644
index 00000000..a2fa066a
--- /dev/null
+++ b/src/gradient/types/model_list_params.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ModelListParams"]
+
+
+class ModelListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
+
+ public_only: bool
+ """Only include models that are publicly available."""
+
+ usecases: List[
+ Literal[
+ "MODEL_USECASE_UNKNOWN",
+ "MODEL_USECASE_AGENT",
+ "MODEL_USECASE_FINETUNED",
+ "MODEL_USECASE_KNOWLEDGEBASE",
+ "MODEL_USECASE_GUARDRAIL",
+ "MODEL_USECASE_REASONING",
+ "MODEL_USECASE_SERVERLESS",
+ ]
+ ]
+ """Include only models defined for the listed usecases.
+
+ - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
+ - MODEL_USECASE_AGENT: The model maybe used in an agent
+ - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
+ - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
+ (embedding models)
+ - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
+ - MODEL_USECASE_REASONING: The model usecase for reasoning
+ - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
+ """
diff --git a/src/gradient/types/model_list_response.py b/src/gradient/types/model_list_response.py
new file mode 100644
index 00000000..48e17809
--- /dev/null
+++ b/src/gradient/types/model_list_response.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .api_model import APIModel
+from .shared.api_meta import APIMeta
+from .shared.api_links import APILinks
+
+__all__ = ["ModelListResponse"]
+
+
+class ModelListResponse(BaseModel):
+ """A list of models"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+ models: Optional[List[APIModel]] = None
+ """The models"""
diff --git a/src/gradient/types/models/__init__.py b/src/gradient/types/models/__init__.py
new file mode 100644
index 00000000..f8ee8b14
--- /dev/null
+++ b/src/gradient/types/models/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
diff --git a/src/gradient/types/models/providers/__init__.py b/src/gradient/types/models/providers/__init__.py
new file mode 100644
index 00000000..74366e70
--- /dev/null
+++ b/src/gradient/types/models/providers/__init__.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .openai_list_params import OpenAIListParams as OpenAIListParams
+from .openai_create_params import OpenAICreateParams as OpenAICreateParams
+from .openai_list_response import OpenAIListResponse as OpenAIListResponse
+from .openai_update_params import OpenAIUpdateParams as OpenAIUpdateParams
+from .anthropic_list_params import AnthropicListParams as AnthropicListParams
+from .openai_create_response import OpenAICreateResponse as OpenAICreateResponse
+from .openai_delete_response import OpenAIDeleteResponse as OpenAIDeleteResponse
+from .openai_update_response import OpenAIUpdateResponse as OpenAIUpdateResponse
+from .anthropic_create_params import AnthropicCreateParams as AnthropicCreateParams
+from .anthropic_list_response import AnthropicListResponse as AnthropicListResponse
+from .anthropic_update_params import AnthropicUpdateParams as AnthropicUpdateParams
+from .openai_retrieve_response import OpenAIRetrieveResponse as OpenAIRetrieveResponse
+from .anthropic_create_response import AnthropicCreateResponse as AnthropicCreateResponse
+from .anthropic_delete_response import AnthropicDeleteResponse as AnthropicDeleteResponse
+from .anthropic_update_response import AnthropicUpdateResponse as AnthropicUpdateResponse
+from .anthropic_retrieve_response import AnthropicRetrieveResponse as AnthropicRetrieveResponse
+from .anthropic_list_agents_params import AnthropicListAgentsParams as AnthropicListAgentsParams
+from .openai_retrieve_agents_params import OpenAIRetrieveAgentsParams as OpenAIRetrieveAgentsParams
+from .anthropic_list_agents_response import AnthropicListAgentsResponse as AnthropicListAgentsResponse
+from .openai_retrieve_agents_response import OpenAIRetrieveAgentsResponse as OpenAIRetrieveAgentsResponse
diff --git a/src/gradient/types/models/providers/anthropic_create_params.py b/src/gradient/types/models/providers/anthropic_create_params.py
new file mode 100644
index 00000000..c9fd6e85
--- /dev/null
+++ b/src/gradient/types/models/providers/anthropic_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AnthropicCreateParams"]
+
+
+class AnthropicCreateParams(TypedDict, total=False):
+ api_key: str
+ """Anthropic API key"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/gradient/types/models/providers/anthropic_create_response.py b/src/gradient/types/models/providers/anthropic_create_response.py
new file mode 100644
index 00000000..0609a486
--- /dev/null
+++ b/src/gradient/types/models/providers/anthropic_create_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["AnthropicCreateResponse"]
+
+
+class AnthropicCreateResponse(BaseModel):
+ """
+ CreateAnthropicAPIKeyOutput is used to return the newly created Anthropic API key.
+ """
+
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/gradient/types/models/providers/anthropic_delete_response.py b/src/gradient/types/models/providers/anthropic_delete_response.py
new file mode 100644
index 00000000..3ad6a9c6
--- /dev/null
+++ b/src/gradient/types/models/providers/anthropic_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["AnthropicDeleteResponse"]
+
+
+class AnthropicDeleteResponse(BaseModel):
+ """DeleteAnthropicAPIKeyOutput is used to return the deleted Anthropic API key."""
+
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/gradient/types/models/providers/anthropic_list_agents_params.py b/src/gradient/types/models/providers/anthropic_list_agents_params.py
new file mode 100644
index 00000000..b3308b69
--- /dev/null
+++ b/src/gradient/types/models/providers/anthropic_list_agents_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AnthropicListAgentsParams"]
+
+
+class AnthropicListAgentsParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/models/providers/anthropic_list_agents_response.py b/src/gradient/types/models/providers/anthropic_list_agents_response.py
new file mode 100644
index 00000000..8e2186cb
--- /dev/null
+++ b/src/gradient/types/models/providers/anthropic_list_agents_response.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
+
+__all__ = ["AnthropicListAgentsResponse"]
+
+
+class AnthropicListAgentsResponse(BaseModel):
+ """List of Agents that linked to a specific Anthropic Key"""
+
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+
+from ...api_agent import APIAgent
diff --git a/src/gradient/types/models/providers/anthropic_list_params.py b/src/gradient/types/models/providers/anthropic_list_params.py
new file mode 100644
index 00000000..ae1cca58
--- /dev/null
+++ b/src/gradient/types/models/providers/anthropic_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AnthropicListParams"]
+
+
+class AnthropicListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/models/providers/anthropic_list_response.py b/src/gradient/types/models/providers/anthropic_list_response.py
new file mode 100644
index 00000000..458bd311
--- /dev/null
+++ b/src/gradient/types/models/providers/anthropic_list_response.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["AnthropicListResponse"]
+
+
+class AnthropicListResponse(BaseModel):
+ """
+ ListAnthropicAPIKeysOutput is used to return the list of Anthropic API keys for a specific agent.
+ """
+
+ api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None
+ """Api key infos"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/models/providers/anthropic_retrieve_response.py b/src/gradient/types/models/providers/anthropic_retrieve_response.py
new file mode 100644
index 00000000..61324b7d
--- /dev/null
+++ b/src/gradient/types/models/providers/anthropic_retrieve_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["AnthropicRetrieveResponse"]
+
+
+class AnthropicRetrieveResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/gradient/types/models/providers/anthropic_update_params.py b/src/gradient/types/models/providers/anthropic_update_params.py
new file mode 100644
index 00000000..865dc29c
--- /dev/null
+++ b/src/gradient/types/models/providers/anthropic_update_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ...._utils import PropertyInfo
+
+__all__ = ["AnthropicUpdateParams"]
+
+
+class AnthropicUpdateParams(TypedDict, total=False):
+ api_key: str
+ """Anthropic API key"""
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+ """API key ID"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/gradient/types/models/providers/anthropic_update_response.py b/src/gradient/types/models/providers/anthropic_update_response.py
new file mode 100644
index 00000000..3e24273c
--- /dev/null
+++ b/src/gradient/types/models/providers/anthropic_update_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["AnthropicUpdateResponse"]
+
+
+class AnthropicUpdateResponse(BaseModel):
+ """UpdateAnthropicAPIKeyOutput is used to return the updated Anthropic API key."""
+
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/gradient/types/models/providers/openai_create_params.py b/src/gradient/types/models/providers/openai_create_params.py
new file mode 100644
index 00000000..8ed7f571
--- /dev/null
+++ b/src/gradient/types/models/providers/openai_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["OpenAICreateParams"]
+
+
+class OpenAICreateParams(TypedDict, total=False):
+ api_key: str
+ """OpenAI API key"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/gradient/types/models/providers/openai_create_response.py b/src/gradient/types/models/providers/openai_create_response.py
new file mode 100644
index 00000000..16aff373
--- /dev/null
+++ b/src/gradient/types/models/providers/openai_create_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["OpenAICreateResponse"]
+
+
+class OpenAICreateResponse(BaseModel):
+ """CreateOpenAIAPIKeyOutput is used to return the newly created OpenAI API key."""
+
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/gradient/types/models/providers/openai_delete_response.py b/src/gradient/types/models/providers/openai_delete_response.py
new file mode 100644
index 00000000..d73a681e
--- /dev/null
+++ b/src/gradient/types/models/providers/openai_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["OpenAIDeleteResponse"]
+
+
+class OpenAIDeleteResponse(BaseModel):
+ """DeleteOpenAIAPIKeyOutput is used to return the deleted OpenAI API key."""
+
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/gradient/types/models/providers/openai_list_params.py b/src/gradient/types/models/providers/openai_list_params.py
new file mode 100644
index 00000000..5677eeaf
--- /dev/null
+++ b/src/gradient/types/models/providers/openai_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["OpenAIListParams"]
+
+
+class OpenAIListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/models/providers/openai_list_response.py b/src/gradient/types/models/providers/openai_list_response.py
new file mode 100644
index 00000000..825ac890
--- /dev/null
+++ b/src/gradient/types/models/providers/openai_list_response.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["OpenAIListResponse"]
+
+
+class OpenAIListResponse(BaseModel):
+ """
+ ListOpenAIAPIKeysOutput is used to return the list of OpenAI API keys for a specific agent.
+ """
+
+ api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None
+ """Api key infos"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/models/providers/openai_retrieve_agents_params.py b/src/gradient/types/models/providers/openai_retrieve_agents_params.py
new file mode 100644
index 00000000..2db6d7a1
--- /dev/null
+++ b/src/gradient/types/models/providers/openai_retrieve_agents_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["OpenAIRetrieveAgentsParams"]
+
+
+class OpenAIRetrieveAgentsParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/gradient/types/models/providers/openai_retrieve_agents_response.py b/src/gradient/types/models/providers/openai_retrieve_agents_response.py
new file mode 100644
index 00000000..f2266f8f
--- /dev/null
+++ b/src/gradient/types/models/providers/openai_retrieve_agents_response.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
+
+__all__ = ["OpenAIRetrieveAgentsResponse"]
+
+
+class OpenAIRetrieveAgentsResponse(BaseModel):
+ """List of Agents that are linked to a specific OpenAI Key"""
+
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+
+from ...api_agent import APIAgent
diff --git a/src/gradient/types/models/providers/openai_retrieve_response.py b/src/gradient/types/models/providers/openai_retrieve_response.py
new file mode 100644
index 00000000..0f382073
--- /dev/null
+++ b/src/gradient/types/models/providers/openai_retrieve_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["OpenAIRetrieveResponse"]
+
+
+class OpenAIRetrieveResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/gradient/types/models/providers/openai_update_params.py b/src/gradient/types/models/providers/openai_update_params.py
new file mode 100644
index 00000000..9b99495e
--- /dev/null
+++ b/src/gradient/types/models/providers/openai_update_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ...._utils import PropertyInfo
+
+__all__ = ["OpenAIUpdateParams"]
+
+
+class OpenAIUpdateParams(TypedDict, total=False):
+ api_key: str
+ """OpenAI API key"""
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+ """API key ID"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/gradient/types/models/providers/openai_update_response.py b/src/gradient/types/models/providers/openai_update_response.py
new file mode 100644
index 00000000..b94a8efe
--- /dev/null
+++ b/src/gradient/types/models/providers/openai_update_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["OpenAIUpdateResponse"]
+
+
+class OpenAIUpdateResponse(BaseModel):
+ """UpdateOpenAIAPIKeyOutput is used to return the updated OpenAI API key."""
+
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/gradient/types/nf_create_params.py b/src/gradient/types/nf_create_params.py
new file mode 100644
index 00000000..fab12a16
--- /dev/null
+++ b/src/gradient/types/nf_create_params.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = ["NfCreateParams"]
+
+
+class NfCreateParams(TypedDict, total=False):
+ name: Required[str]
+ """The human-readable name of the share."""
+
+ region: Required[str]
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
+
+ size_gib: Required[int]
+ """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50."""
+
+ vpc_ids: Required[SequenceNotStr[str]]
+ """List of VPC IDs that should be able to access the share."""
+
+ performance_tier: str
+ """The performance tier of the share."""
diff --git a/src/gradient/types/nf_create_response.py b/src/gradient/types/nf_create_response.py
new file mode 100644
index 00000000..5016d776
--- /dev/null
+++ b/src/gradient/types/nf_create_response.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["NfCreateResponse", "Share"]
+
+
+class Share(BaseModel):
+ id: str
+ """The unique identifier of the NFS share."""
+
+ created_at: datetime
+ """Timestamp for when the NFS share was created."""
+
+ name: str
+ """The human-readable name of the share."""
+
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
+
+ size_gib: int
+ """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50."""
+
+ status: Literal["CREATING", "ACTIVE", "FAILED", "DELETED"]
+ """The current status of the share."""
+
+ host: Optional[str] = None
+ """The host IP of the NFS server that will be accessible from the associated VPC"""
+
+ mount_path: Optional[str] = None
+ """
+ Path at which the share will be available, to be mounted at a target of the
+ user's choice within the client
+ """
+
+ vpc_ids: Optional[List[str]] = None
+ """List of VPC IDs that should be able to access the share."""
+
+
+class NfCreateResponse(BaseModel):
+ share: Optional[Share] = None
diff --git a/src/gradient/types/nf_delete_params.py b/src/gradient/types/nf_delete_params.py
new file mode 100644
index 00000000..c507a370
--- /dev/null
+++ b/src/gradient/types/nf_delete_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["NfDeleteParams"]
+
+
+class NfDeleteParams(TypedDict, total=False):
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
diff --git a/src/gradient/types/nf_initiate_action_params.py b/src/gradient/types/nf_initiate_action_params.py
new file mode 100644
index 00000000..1080d816
--- /dev/null
+++ b/src/gradient/types/nf_initiate_action_params.py
@@ -0,0 +1,103 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = [
+ "NfInitiateActionParams",
+ "NfsActionResize",
+ "NfsActionResizeParams",
+ "NfsActionSnapshot",
+ "NfsActionSnapshotParams",
+ "NfsActionAttach",
+ "NfsActionAttachParams",
+ "NfsActionDetach",
+ "NfsActionDetachParams",
+ "NfsActionSwitchPerformanceTier",
+ "NfsActionSwitchPerformanceTierParams",
+]
+
+
+class NfsActionResize(TypedDict, total=False):
+ type: Required[Literal["resize", "snapshot"]]
+ """The type of action to initiate for the NFS share (such as resize or snapshot)."""
+
+ params: NfsActionResizeParams
+
+ region: str
+ """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides."""
+
+
+class NfsActionResizeParams(TypedDict, total=False):
+ size_gib: Required[int]
+ """The new size for the NFS share."""
+
+
+class NfsActionSnapshot(TypedDict, total=False):
+ type: Required[Literal["resize", "snapshot"]]
+ """The type of action to initiate for the NFS share (such as resize or snapshot)."""
+
+ params: NfsActionSnapshotParams
+
+ region: str
+ """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides."""
+
+
+class NfsActionSnapshotParams(TypedDict, total=False):
+ name: Required[str]
+ """Snapshot name of the NFS share"""
+
+
+class NfsActionAttach(TypedDict, total=False):
+ type: Required[Literal["resize", "snapshot"]]
+ """The type of action to initiate for the NFS share (such as resize or snapshot)."""
+
+ params: NfsActionAttachParams
+
+ region: str
+ """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides."""
+
+
+class NfsActionAttachParams(TypedDict, total=False):
+ vpc_id: Required[str]
+ """The ID of the VPC to which the NFS share will be attached"""
+
+
+class NfsActionDetach(TypedDict, total=False):
+ type: Required[Literal["resize", "snapshot"]]
+ """The type of action to initiate for the NFS share (such as resize or snapshot)."""
+
+ params: NfsActionDetachParams
+
+ region: str
+ """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides."""
+
+
+class NfsActionDetachParams(TypedDict, total=False):
+ vpc_id: Required[str]
+ """The ID of the VPC from which the NFS share will be detached"""
+
+
+class NfsActionSwitchPerformanceTier(TypedDict, total=False):
+ type: Required[Literal["resize", "snapshot"]]
+ """The type of action to initiate for the NFS share (such as resize or snapshot)."""
+
+ params: NfsActionSwitchPerformanceTierParams
+
+ region: str
+ """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides."""
+
+
+class NfsActionSwitchPerformanceTierParams(TypedDict, total=False):
+ performance_tier: Required[str]
+ """
+ The performance tier to which the NFS share will be switched (e.g., standard,
+ high).
+ """
+
+
+NfInitiateActionParams: TypeAlias = Union[
+ NfsActionResize, NfsActionSnapshot, NfsActionAttach, NfsActionDetach, NfsActionSwitchPerformanceTier
+]
diff --git a/src/gradient/types/nf_initiate_action_response.py b/src/gradient/types/nf_initiate_action_response.py
new file mode 100644
index 00000000..58618450
--- /dev/null
+++ b/src/gradient/types/nf_initiate_action_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["NfInitiateActionResponse", "Action"]
+
+
+class Action(BaseModel):
+ """The action that was submitted."""
+
+ region_slug: str
+ """The DigitalOcean region slug where the resource is located."""
+
+ resource_id: str
+ """The unique identifier of the resource on which the action is being performed."""
+
+ resource_type: Literal["network_file_share", "network_file_share_snapshot"]
+ """The type of resource on which the action is being performed."""
+
+ started_at: datetime
+ """The timestamp when the action was started."""
+
+ status: Literal["in-progress", "completed", "errored"]
+ """The current status of the action."""
+
+ type: str
+ """The type of action being performed."""
+
+
+class NfInitiateActionResponse(BaseModel):
+ """Action response of an NFS share."""
+
+ action: Action
+ """The action that was submitted."""
diff --git a/src/gradient/types/nf_list_params.py b/src/gradient/types/nf_list_params.py
new file mode 100644
index 00000000..52b4d96d
--- /dev/null
+++ b/src/gradient/types/nf_list_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["NfListParams"]
+
+
+class NfListParams(TypedDict, total=False):
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
diff --git a/src/gradient/types/nf_list_response.py b/src/gradient/types/nf_list_response.py
new file mode 100644
index 00000000..c5af118b
--- /dev/null
+++ b/src/gradient/types/nf_list_response.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["NfListResponse", "Share"]
+
+
+class Share(BaseModel):
+ id: str
+ """The unique identifier of the NFS share."""
+
+ created_at: datetime
+ """Timestamp for when the NFS share was created."""
+
+ name: str
+ """The human-readable name of the share."""
+
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
+
+ size_gib: int
+ """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50."""
+
+ status: Literal["CREATING", "ACTIVE", "FAILED", "DELETED"]
+ """The current status of the share."""
+
+ host: Optional[str] = None
+ """The host IP of the NFS server that will be accessible from the associated VPC"""
+
+ mount_path: Optional[str] = None
+ """
+ Path at which the share will be available, to be mounted at a target of the
+ user's choice within the client
+ """
+
+ vpc_ids: Optional[List[str]] = None
+ """List of VPC IDs that should be able to access the share."""
+
+
+class NfListResponse(BaseModel):
+ shares: Optional[List[Share]] = None
diff --git a/src/gradient/types/nf_retrieve_params.py b/src/gradient/types/nf_retrieve_params.py
new file mode 100644
index 00000000..6d7ba724
--- /dev/null
+++ b/src/gradient/types/nf_retrieve_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["NfRetrieveParams"]
+
+
+class NfRetrieveParams(TypedDict, total=False):
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
diff --git a/src/gradient/types/nf_retrieve_response.py b/src/gradient/types/nf_retrieve_response.py
new file mode 100644
index 00000000..897f07f0
--- /dev/null
+++ b/src/gradient/types/nf_retrieve_response.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["NfRetrieveResponse", "Share"]
+
+
+class Share(BaseModel):
+ id: str
+ """The unique identifier of the NFS share."""
+
+ created_at: datetime
+ """Timestamp for when the NFS share was created."""
+
+ name: str
+ """The human-readable name of the share."""
+
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
+
+ size_gib: int
+ """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50."""
+
+ status: Literal["CREATING", "ACTIVE", "FAILED", "DELETED"]
+ """The current status of the share."""
+
+ host: Optional[str] = None
+ """The host IP of the NFS server that will be accessible from the associated VPC"""
+
+ mount_path: Optional[str] = None
+ """
+ Path at which the share will be available, to be mounted at a target of the
+ user's choice within the client
+ """
+
+ vpc_ids: Optional[List[str]] = None
+ """List of VPC IDs that should be able to access the share."""
+
+
+class NfRetrieveResponse(BaseModel):
+ share: Optional[Share] = None
diff --git a/src/gradient/types/nfs/__init__.py b/src/gradient/types/nfs/__init__.py
new file mode 100644
index 00000000..41777980
--- /dev/null
+++ b/src/gradient/types/nfs/__init__.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .snapshot_list_params import SnapshotListParams as SnapshotListParams
+from .snapshot_delete_params import SnapshotDeleteParams as SnapshotDeleteParams
+from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse
+from .snapshot_retrieve_params import SnapshotRetrieveParams as SnapshotRetrieveParams
+from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse
diff --git a/src/gradient/types/nfs/snapshot_delete_params.py b/src/gradient/types/nfs/snapshot_delete_params.py
new file mode 100644
index 00000000..844da45e
--- /dev/null
+++ b/src/gradient/types/nfs/snapshot_delete_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["SnapshotDeleteParams"]
+
+
+class SnapshotDeleteParams(TypedDict, total=False):
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
diff --git a/src/gradient/types/nfs/snapshot_list_params.py b/src/gradient/types/nfs/snapshot_list_params.py
new file mode 100644
index 00000000..64f9543b
--- /dev/null
+++ b/src/gradient/types/nfs/snapshot_list_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["SnapshotListParams"]
+
+
+class SnapshotListParams(TypedDict, total=False):
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
+
+ share_id: str
+ """The unique ID of an NFS share.
+
+ If provided, only snapshots of this specific share will be returned.
+ """
diff --git a/src/gradient/types/nfs/snapshot_list_response.py b/src/gradient/types/nfs/snapshot_list_response.py
new file mode 100644
index 00000000..a46342ce
--- /dev/null
+++ b/src/gradient/types/nfs/snapshot_list_response.py
@@ -0,0 +1,38 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["SnapshotListResponse", "Snapshot"]
+
+
+class Snapshot(BaseModel):
+ """Represents an NFS snapshot."""
+
+ id: str
+ """The unique identifier of the snapshot."""
+
+ created_at: datetime
+ """The timestamp when the snapshot was created."""
+
+ name: str
+ """The human-readable name of the snapshot."""
+
+ region: str
+ """The DigitalOcean region slug where the snapshot is located."""
+
+ share_id: str
+ """The unique identifier of the share from which this snapshot was created."""
+
+ size_gib: int
+ """The size of the snapshot in GiB."""
+
+ status: Literal["UNKNOWN", "CREATING", "ACTIVE", "FAILED", "DELETED"]
+ """The current status of the snapshot."""
+
+
+class SnapshotListResponse(BaseModel):
+ snapshots: Optional[List[Snapshot]] = None
diff --git a/src/gradient/types/nfs/snapshot_retrieve_params.py b/src/gradient/types/nfs/snapshot_retrieve_params.py
new file mode 100644
index 00000000..2c3fcda5
--- /dev/null
+++ b/src/gradient/types/nfs/snapshot_retrieve_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["SnapshotRetrieveParams"]
+
+
+class SnapshotRetrieveParams(TypedDict, total=False):
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
diff --git a/src/gradient/types/nfs/snapshot_retrieve_response.py b/src/gradient/types/nfs/snapshot_retrieve_response.py
new file mode 100644
index 00000000..12a69d0a
--- /dev/null
+++ b/src/gradient/types/nfs/snapshot_retrieve_response.py
@@ -0,0 +1,39 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["SnapshotRetrieveResponse", "Snapshot"]
+
+
+class Snapshot(BaseModel):
+ """Represents an NFS snapshot."""
+
+ id: str
+ """The unique identifier of the snapshot."""
+
+ created_at: datetime
+ """The timestamp when the snapshot was created."""
+
+ name: str
+ """The human-readable name of the snapshot."""
+
+ region: str
+ """The DigitalOcean region slug where the snapshot is located."""
+
+ share_id: str
+ """The unique identifier of the share from which this snapshot was created."""
+
+ size_gib: int
+ """The size of the snapshot in GiB."""
+
+ status: Literal["UNKNOWN", "CREATING", "ACTIVE", "FAILED", "DELETED"]
+ """The current status of the snapshot."""
+
+
+class SnapshotRetrieveResponse(BaseModel):
+ snapshot: Optional[Snapshot] = None
+ """Represents an NFS snapshot."""
diff --git a/src/gradient/types/region_list_params.py b/src/gradient/types/region_list_params.py
new file mode 100644
index 00000000..4fef37b3
--- /dev/null
+++ b/src/gradient/types/region_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["RegionListParams"]
+
+
+class RegionListParams(TypedDict, total=False):
+ page: int
+ """Which 'page' of paginated results to return."""
+
+ per_page: int
+ """Number of items returned per page"""
diff --git a/src/gradient/types/region_list_response.py b/src/gradient/types/region_list_response.py
new file mode 100644
index 00000000..f1bf4c69
--- /dev/null
+++ b/src/gradient/types/region_list_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .shared.region import Region
+from .shared.page_links import PageLinks
+from .shared.meta_properties import MetaProperties
+
+__all__ = ["RegionListResponse"]
+
+
+class RegionListResponse(BaseModel):
+ meta: MetaProperties
+ """Information about the response itself."""
+
+ regions: List[Region]
+
+ links: Optional[PageLinks] = None
diff --git a/src/gradient/types/response_create_params.py b/src/gradient/types/response_create_params.py
new file mode 100644
index 00000000..42716bb3
--- /dev/null
+++ b/src/gradient/types/response_create_params.py
@@ -0,0 +1,323 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = [
+ "ResponseCreateParamsBase",
+ "InputUnionMember1",
+ "InputUnionMember1UnionMember0",
+ "InputUnionMember1UnionMember0Content",
+ "InputUnionMember1UnionMember1",
+ "InputUnionMember1UnionMember1ContentUnionMember1",
+ "InputUnionMember1UnionMember1ContentUnionMember1UnionMember0",
+ "InputUnionMember1UnionMember1ToolCall",
+ "InputUnionMember1UnionMember1ToolCallFunction",
+ "StreamOptions",
+ "ToolChoice",
+ "ToolChoiceChatCompletionNamedToolChoice",
+ "ToolChoiceChatCompletionNamedToolChoiceFunction",
+ "Tool",
+ "ResponseCreateParamsNonStreaming",
+ "ResponseCreateParamsStreaming",
+]
+
+
+class ResponseCreateParamsBase(TypedDict, total=False):
+ input: Required[Union[str, Iterable[InputUnionMember1]]]
+ """The input text prompt or conversation history.
+
+ Can be a string or an array of message objects for conversation context.
+ """
+
+ model: Required[str]
+ """Model ID used to generate the response. Must be a VLLM model."""
+
+ instructions: Optional[str]
+ """System-level instructions for the model.
+
+ This sets the behavior and context for the response generation.
+ """
+
+ max_output_tokens: Optional[int]
+ """Maximum number of tokens to generate in the response.
+
+ If not specified, the model will use a default value.
+ """
+
+ max_tokens: Optional[int]
+ """The maximum number of tokens that can be generated in the completion.
+
+ Alias for max_output_tokens for compatibility.
+ """
+
+ metadata: Optional[Dict[str, str]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ modalities: Optional[List[Literal["text"]]]
+ """Specifies the output types the model should generate.
+
+ For text-to-text, this should be ["text"].
+ """
+
+ parallel_tool_calls: Optional[bool]
+ """Whether to enable parallel tool calls.
+
+ When true, the model can make multiple tool calls in parallel.
+ """
+
+ stop: Union[Optional[str], SequenceNotStr[str], None]
+ """Up to 4 sequences where the API will stop generating further tokens.
+
+ The returned text will not contain the stop sequence.
+ """
+
+ stream_options: Optional[StreamOptions]
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. We generally recommend altering
+ this or `top_p` but not both.
+ """
+
+ tool_choice: ToolChoice
+ """
+ Controls which (if any) tool is called by the model. `none` means the model will
+ not call any tool and instead generates a message. `auto` means the model can
+ pick between generating a message or calling one or more tools. `required` means
+ the model must call one or more tools. Specifying a particular tool via
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ call that tool.
+
+ `none` is the default when no tools are present. `auto` is the default if tools
+ are present.
+ """
+
+ tools: Iterable[Tool]
+ """A list of tools the model may call.
+
+ Currently, only functions are supported as a tool. Uses Responses API format
+ (with `name`, `description`, `parameters` at top level).
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+ """
+
+ user: str
+ """
+ A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+ """
+
+
+class InputUnionMember1UnionMember0Content(TypedDict, total=False, extra_items=object): # type: ignore[call-arg]
+ text: str
+ """The reasoning text content"""
+
+ type: Literal["reasoning_text"]
+ """The type of content"""
+
+
+class InputUnionMember1UnionMember0(TypedDict, total=False, extra_items=object): # type: ignore[call-arg]
+ type: Required[Literal["function_call", "function_call_output", "reasoning"]]
+ """
+ The type of input item (must be function_call, function_call_output, or
+ reasoning)
+ """
+
+ id: str
+ """The unique ID of the reasoning item (optional for reasoning)"""
+
+ arguments: str
+ """JSON string of function arguments (required for function_call)"""
+
+ call_id: str
+ """The call ID (required for function_call and function_call_output)"""
+
+ content: Optional[Iterable[InputUnionMember1UnionMember0Content]]
+ """Array of reasoning content parts (optional for reasoning, can be null)"""
+
+ encrypted_content: Optional[str]
+ """Encrypted content (optional)"""
+
+ name: str
+ """The function name (required for function_call)"""
+
+ output: str
+ """JSON string of function output (required for function_call_output)"""
+
+ status: Optional[str]
+ """Status of the item (optional, can be null)"""
+
+ summary: Iterable[object]
+ """Summary of the reasoning (optional for reasoning)"""
+
+
+class InputUnionMember1UnionMember1ContentUnionMember1UnionMember0(TypedDict, total=False):
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["input_text"]]
+ """The type of content part"""
+
+
+InputUnionMember1UnionMember1ContentUnionMember1: TypeAlias = Union[
+ InputUnionMember1UnionMember1ContentUnionMember1UnionMember0, Dict[str, object]
+]
+
+
+class InputUnionMember1UnionMember1ToolCallFunction(TypedDict, total=False):
+ """The function that the model called."""
+
+ arguments: Required[str]
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: Required[str]
+ """The name of the function to call."""
+
+
+class InputUnionMember1UnionMember1ToolCall(TypedDict, total=False):
+ id: Required[str]
+ """The ID of the tool call."""
+
+ function: Required[InputUnionMember1UnionMember1ToolCallFunction]
+ """The function that the model called."""
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class InputUnionMember1UnionMember1(TypedDict, total=False, extra_items=object): # type: ignore[call-arg]
+ content: Required[Union[str, Iterable[InputUnionMember1UnionMember1ContentUnionMember1]]]
+ """The content of the message (string or content parts array)"""
+
+ role: Literal["user", "assistant", "system", "tool", "developer"]
+ """The role of the message author"""
+
+ tool_call_id: str
+ """Tool call ID that this message is responding to (required for tool role)"""
+
+ tool_calls: Iterable[InputUnionMember1UnionMember1ToolCall]
+ """Tool calls made by the assistant (for assistant role messages)"""
+
+ type: Literal["message"]
+ """Optional type identifier for message items (used by some clients like Codex)"""
+
+
+InputUnionMember1: TypeAlias = Union[InputUnionMember1UnionMember0, InputUnionMember1UnionMember1]
+
+
+class StreamOptions(TypedDict, total=False):
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
+ include_usage: bool
+ """If set, an additional chunk will be streamed before the `data: [DONE]` message.
+
+ The `usage` field on this chunk shows the token usage statistics for the entire
+ request, and the `choices` field will always be an empty array.
+
+ All other chunks will also include a `usage` field, but with a null value.
+ **NOTE:** If the stream is interrupted, you may not receive the final usage
+ chunk which contains the total token usage for the request.
+ """
+
+
+class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False):
+ name: Required[str]
+ """The name of the function to call."""
+
+
+class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False):
+ """Specifies a tool the model should use.
+
+ Use to force the model to call a specific function.
+ """
+
+ function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction]
+
+ type: Required[Literal["function"]]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice]
+
+
+class Tool(TypedDict, total=False):
+ """Tool definition for Responses API (flat format).
+
+ This format is used by VLLM's Responses API where name, description, and parameters are at the top level of the tool object.
+ """
+
+ type: Required[Literal["function", "web_search", "web_search_2025_08_26"]]
+ """The type of the tool.
+
+ Supported values are `function` (custom tools), `web_search`, and
+ `web_search_2025_08_26` (built-in web search).
+ """
+
+ description: str
+ """
+ A description of what the function does, used by the model to choose when and
+ how to call the function.
+ """
+
+ name: str
+ """The name of the function to be called.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ parameters: Dict[str, object]
+ """The parameters the functions accepts, described as a JSON Schema object.
+
+ See the [guide](/docs/guides/function-calling) for examples, and the
+ [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ documentation about the format.
+
+ Omitting `parameters` defines a function with an empty parameter list.
+ """
+
+
+class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+ """
+
+
+class ResponseCreateParamsStreaming(ResponseCreateParamsBase):
+ stream: Required[Literal[True]]
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using server-sent events.
+ """
+
+
+ResponseCreateParams = Union[ResponseCreateParamsNonStreaming, ResponseCreateParamsStreaming]
diff --git a/src/gradient/types/retrieve_documents_params.py b/src/gradient/types/retrieve_documents_params.py
new file mode 100644
index 00000000..359d8a07
--- /dev/null
+++ b/src/gradient/types/retrieve_documents_params.py
@@ -0,0 +1,75 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = ["RetrieveDocumentsParams", "Filters", "FiltersMust", "FiltersMustNot", "FiltersShould"]
+
+
+class RetrieveDocumentsParams(TypedDict, total=False):
+ num_results: Required[int]
+ """Number of results to return"""
+
+ query: Required[str]
+ """The search query text"""
+
+ alpha: float
+ """Weight for hybrid search (0-1):
+
+ - 0 = pure keyword search (BM25)
+ - 1 = pure vector search (default)
+ - 0.5 = balanced hybrid search
+ """
+
+ filters: Filters
+ """Metadata filters to apply to the search"""
+
+
+class FiltersMust(TypedDict, total=False):
+ field: Required[str]
+ """Metadata field name"""
+
+ operator: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte", "in", "not_in", "contains"]]
+ """Comparison operator"""
+
+ value: Required[Union[str, float, bool, SequenceNotStr[str]]]
+ """Value to compare against (type depends on field)"""
+
+
+class FiltersMustNot(TypedDict, total=False):
+ field: Required[str]
+ """Metadata field name"""
+
+ operator: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte", "in", "not_in", "contains"]]
+ """Comparison operator"""
+
+ value: Required[Union[str, float, bool, SequenceNotStr[str]]]
+ """Value to compare against (type depends on field)"""
+
+
+class FiltersShould(TypedDict, total=False):
+ field: Required[str]
+ """Metadata field name"""
+
+ operator: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte", "in", "not_in", "contains"]]
+ """Comparison operator"""
+
+ value: Required[Union[str, float, bool, SequenceNotStr[str]]]
+ """Value to compare against (type depends on field)"""
+
+
+class Filters(TypedDict, total=False):
+ """Metadata filters to apply to the search"""
+
+ must: Iterable[FiltersMust]
+ """All conditions must match (AND)"""
+
+ must_not: Iterable[FiltersMustNot]
+ """No conditions should match (NOT)"""
+
+ should: Iterable[FiltersShould]
+ """At least one condition must match (OR)"""
diff --git a/src/gradient/types/retrieve_documents_response.py b/src/gradient/types/retrieve_documents_response.py
new file mode 100644
index 00000000..79f3e8eb
--- /dev/null
+++ b/src/gradient/types/retrieve_documents_response.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List
+
+from .._models import BaseModel
+
+__all__ = ["RetrieveDocumentsResponse", "Result"]
+
+
+class Result(BaseModel):
+ metadata: Dict[str, object]
+ """Metadata associated with the document"""
+
+ text_content: str
+ """The text content of the document chunk"""
+
+
+class RetrieveDocumentsResponse(BaseModel):
+ results: List[Result]
+ """Array of retrieved document chunks"""
+
+ total_results: int
+ """Number of results returned"""
diff --git a/src/gradient/types/shared/__init__.py b/src/gradient/types/shared/__init__.py
new file mode 100644
index 00000000..272092b7
--- /dev/null
+++ b/src/gradient/types/shared/__init__.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .size import Size as Size
+from .image import Image as Image
+from .action import Action as Action
+from .kernel import Kernel as Kernel
+from .region import Region as Region
+from .droplet import Droplet as Droplet
+from .api_meta import APIMeta as APIMeta
+from .gpu_info import GPUInfo as GPUInfo
+from .api_links import APILinks as APILinks
+from .disk_info import DiskInfo as DiskInfo
+from .snapshots import Snapshots as Snapshots
+from .network_v4 import NetworkV4 as NetworkV4
+from .network_v6 import NetworkV6 as NetworkV6
+from .page_links import PageLinks as PageLinks
+from .action_link import ActionLink as ActionLink
+from .vpc_peering import VpcPeering as VpcPeering
+from .subscription import Subscription as Subscription
+from .forward_links import ForwardLinks as ForwardLinks
+from .backward_links import BackwardLinks as BackwardLinks
+from .meta_properties import MetaProperties as MetaProperties
+from .completion_usage import CompletionUsage as CompletionUsage
+from .garbage_collection import GarbageCollection as GarbageCollection
+from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget
+from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
+from .image_gen_stream_event import ImageGenStreamEvent as ImageGenStreamEvent
+from .subscription_tier_base import SubscriptionTierBase as SubscriptionTierBase
+from .create_response_response import CreateResponseResponse as CreateResponseResponse
+from .image_gen_completed_event import ImageGenCompletedEvent as ImageGenCompletedEvent
+from .droplet_next_backup_window import DropletNextBackupWindow as DropletNextBackupWindow
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
+from .image_gen_partial_image_event import ImageGenPartialImageEvent as ImageGenPartialImageEvent
+from .create_response_stream_response import CreateResponseStreamResponse as CreateResponseStreamResponse
diff --git a/src/gradient/types/shared/action.py b/src/gradient/types/shared/action.py
new file mode 100644
index 00000000..2b9fbf4e
--- /dev/null
+++ b/src/gradient/types/shared/action.py
@@ -0,0 +1,51 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .region import Region
+from ..._models import BaseModel
+
+__all__ = ["Action"]
+
+
+class Action(BaseModel):
+ id: Optional[int] = None
+ """A unique numeric ID that can be used to identify and reference an action."""
+
+ completed_at: Optional[datetime] = None
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the action was completed.
+ """
+
+ region: Optional[Region] = None
+
+ region_slug: Optional[str] = None
+ """A human-readable string that is used as a unique identifier for each region."""
+
+ resource_id: Optional[int] = None
+ """A unique identifier for the resource that the action is associated with."""
+
+ resource_type: Optional[str] = None
+ """The type of resource that the action is associated with."""
+
+ started_at: Optional[datetime] = None
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the action was initiated.
+ """
+
+ status: Optional[Literal["in-progress", "completed", "errored"]] = None
+ """The current status of the action.
+
+ This can be "in-progress", "completed", or "errored".
+ """
+
+ type: Optional[str] = None
+ """This is the type of action that the object represents.
+
+ For example, this could be "transfer" to represent the state of an image
+ transfer action.
+ """
diff --git a/src/gradient/types/shared/action_link.py b/src/gradient/types/shared/action_link.py
new file mode 100644
index 00000000..143c66e3
--- /dev/null
+++ b/src/gradient/types/shared/action_link.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["ActionLink"]
+
+
+class ActionLink(BaseModel):
+ """The linked actions can be used to check the status of a Droplet's create event."""
+
+ id: Optional[int] = None
+ """A unique numeric ID that can be used to identify and reference an action."""
+
+ href: Optional[str] = None
+ """A URL that can be used to access the action."""
+
+ rel: Optional[str] = None
+ """A string specifying the type of the related action."""
diff --git a/src/gradient/types/shared/api_links.py b/src/gradient/types/shared/api_links.py
new file mode 100644
index 00000000..ce9be06b
--- /dev/null
+++ b/src/gradient/types/shared/api_links.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["APILinks", "Pages"]
+
+
+class Pages(BaseModel):
+ """Information about how to reach other pages"""
+
+ first: Optional[str] = None
+ """First page"""
+
+ last: Optional[str] = None
+ """Last page"""
+
+ next: Optional[str] = None
+ """Next page"""
+
+ previous: Optional[str] = None
+ """Previous page"""
+
+
+class APILinks(BaseModel):
+ """Links to other pages"""
+
+ pages: Optional[Pages] = None
+ """Information about how to reach other pages"""
diff --git a/src/gradient/types/shared/api_meta.py b/src/gradient/types/shared/api_meta.py
new file mode 100644
index 00000000..1a8cdede
--- /dev/null
+++ b/src/gradient/types/shared/api_meta.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["APIMeta"]
+
+
+class APIMeta(BaseModel):
+ """Meta information about the data set"""
+
+ page: Optional[int] = None
+ """The current page"""
+
+ pages: Optional[int] = None
+ """Total number of pages"""
+
+ total: Optional[int] = None
+ """Total amount of items over all pages"""
diff --git a/src/gradient/types/shared/backward_links.py b/src/gradient/types/shared/backward_links.py
new file mode 100644
index 00000000..502fefef
--- /dev/null
+++ b/src/gradient/types/shared/backward_links.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["BackwardLinks"]
+
+
+class BackwardLinks(BaseModel):
+ first: Optional[str] = None
+ """URI of the first page of the results."""
+
+ prev: Optional[str] = None
+ """URI of the previous page of the results."""
diff --git a/src/gradient/types/shared/chat_completion_chunk.py b/src/gradient/types/shared/chat_completion_chunk.py
new file mode 100644
index 00000000..53d5a563
--- /dev/null
+++ b/src/gradient/types/shared/chat_completion_chunk.py
@@ -0,0 +1,132 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .completion_usage import CompletionUsage
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob
+
+__all__ = [
+ "ChatCompletionChunk",
+ "Choice",
+ "ChoiceDelta",
+ "ChoiceDeltaToolCall",
+ "ChoiceDeltaToolCallFunction",
+ "ChoiceLogprobs",
+]
+
+
+class ChoiceDeltaToolCallFunction(BaseModel):
+ """A chunk of a function that the model called."""
+
+ arguments: Optional[str] = None
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: Optional[str] = None
+ """The name of the function to call."""
+
+
+class ChoiceDeltaToolCall(BaseModel):
+ index: int
+
+ id: Optional[str] = None
+ """The ID of the tool call."""
+
+ function: Optional[ChoiceDeltaToolCallFunction] = None
+ """A chunk of a function that the model called."""
+
+ type: Optional[Literal["function"]] = None
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class ChoiceDelta(BaseModel):
+ """A chat completion delta generated by streamed model responses."""
+
+ content: Optional[str] = None
+ """The contents of the chunk message."""
+
+ reasoning_content: Optional[str] = None
+ """The reasoning content generated by the model."""
+
+ refusal: Optional[str] = None
+ """The refusal message generated by the model."""
+
+ role: Optional[Literal["developer", "user", "assistant"]] = None
+ """The role of the author of this message."""
+
+ tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
+
+
+class ChoiceLogprobs(BaseModel):
+ """Log probability information for the choice."""
+
+ content: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message content tokens with log probability information."""
+
+ refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message refusal tokens with log probability information."""
+
+
+class Choice(BaseModel):
+ delta: ChoiceDelta
+ """A chat completion delta generated by streamed model responses."""
+
+ finish_reason: Optional[Literal["stop", "length", "tool_calls", "content_filter"]] = None
+ """The reason the model stopped generating tokens.
+
+ This will be `stop` if the model hit a natural stop point or a provided stop
+ sequence, or `length` if the maximum number of tokens specified in the request
+ was reached, `tool_calls` if the model called a tool.
+ """
+
+ index: int
+ """The index of the choice in the list of choices."""
+
+ logprobs: Optional[ChoiceLogprobs] = None
+ """Log probability information for the choice."""
+
+
+class ChatCompletionChunk(BaseModel):
+ """
+ Represents a streamed chunk of a chat completion response returned
+ by the model, based on the provided input.
+ """
+
+ id: str
+ """A unique identifier for the chat completion. Each chunk has the same ID."""
+
+ choices: List[Choice]
+ """A list of chat completion choices.
+
+ Can contain more than one elements if `n` is greater than 1. Can also be empty
+ for the last chunk if you set `stream_options: {"include_usage": true}`.
+ """
+
+ created: int
+ """The Unix timestamp (in seconds) of when the chat completion was created.
+
+ Each chunk has the same timestamp.
+ """
+
+ model: str
+ """The model to generate the completion."""
+
+ object: Literal["chat.completion.chunk"]
+ """The object type, which is always `chat.completion.chunk`."""
+
+ usage: Optional[CompletionUsage] = None
+ """
+ An optional field that will only be present when you set
+ `stream_options: {"include_usage": true}` in your request. When present, it
+ contains a null value **except for the last chunk** which contains the token
+ usage statistics for the entire request.
+
+ **NOTE:** If the stream is interrupted or cancelled, you may not receive the
+ final usage chunk which contains the total token usage for the request.
+ """
diff --git a/src/gradient/types/shared/chat_completion_token_logprob.py b/src/gradient/types/shared/chat_completion_token_logprob.py
new file mode 100644
index 00000000..c69e2589
--- /dev/null
+++ b/src/gradient/types/shared/chat_completion_token_logprob.py
@@ -0,0 +1,57 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"]
+
+
+class TopLogprob(BaseModel):
+ token: str
+ """The token."""
+
+ bytes: Optional[List[int]] = None
+ """A list of integers representing the UTF-8 bytes representation of the token.
+
+ Useful in instances where characters are represented by multiple tokens and
+ their byte representations must be combined to generate the correct text
+ representation. Can be `null` if there is no bytes representation for the token.
+ """
+
+ logprob: float
+ """The log probability of this token, if it is within the top 20 most likely
+ tokens.
+
+ Otherwise, the value `-9999.0` is used to signify that the token is very
+ unlikely.
+ """
+
+
+class ChatCompletionTokenLogprob(BaseModel):
+ token: str
+ """The token."""
+
+ bytes: Optional[List[int]] = None
+ """A list of integers representing the UTF-8 bytes representation of the token.
+
+ Useful in instances where characters are represented by multiple tokens and
+ their byte representations must be combined to generate the correct text
+ representation. Can be `null` if there is no bytes representation for the token.
+ """
+
+ logprob: float
+ """The log probability of this token, if it is within the top 20 most likely
+ tokens.
+
+ Otherwise, the value `-9999.0` is used to signify that the token is very
+ unlikely.
+ """
+
+ top_logprobs: List[TopLogprob]
+ """List of the most likely tokens and their log probability, at this token
+ position.
+
+ In rare cases, there may be fewer than the number of requested `top_logprobs`
+ returned.
+ """
diff --git a/src/gradient/types/shared/completion_usage.py b/src/gradient/types/shared/completion_usage.py
new file mode 100644
index 00000000..596bd045
--- /dev/null
+++ b/src/gradient/types/shared/completion_usage.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ..._models import BaseModel
+
+__all__ = ["CompletionUsage", "CacheCreation"]
+
+
+class CacheCreation(BaseModel):
+ """Breakdown of prompt tokens written to cache."""
+
+ ephemeral_1h_input_tokens: int
+ """Number of prompt tokens written to 1h cache."""
+
+ ephemeral_5m_input_tokens: int
+ """Number of prompt tokens written to 5m cache."""
+
+
+class CompletionUsage(BaseModel):
+ """Usage statistics for the completion request."""
+
+ cache_created_input_tokens: int
+ """Number of prompt tokens written to cache."""
+
+ cache_creation: CacheCreation
+ """Breakdown of prompt tokens written to cache."""
+
+ cache_read_input_tokens: int
+ """Number of prompt tokens read from cache."""
+
+ completion_tokens: int
+ """Number of tokens in the generated completion."""
+
+ prompt_tokens: int
+ """Number of tokens in the prompt."""
+
+ total_tokens: int
+ """Total number of tokens used in the request (prompt + completion)."""
diff --git a/src/gradient/types/shared/create_response_response.py b/src/gradient/types/shared/create_response_response.py
new file mode 100644
index 00000000..61dbb155
--- /dev/null
+++ b/src/gradient/types/shared/create_response_response.py
@@ -0,0 +1,332 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import builtins
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob
+
+__all__ = [
+ "CreateResponseResponse",
+ "Usage",
+ "UsageInputTokensDetails",
+ "UsageOutputTokensDetails",
+ "Choice",
+ "ChoiceMessage",
+ "ChoiceMessageToolCall",
+ "ChoiceMessageToolCallFunction",
+ "ChoiceLogprobs",
+ "Output",
+ "OutputUnionMember0",
+ "OutputUnionMember1",
+ "OutputUnionMember2",
+ "OutputUnionMember2Content",
+ "Tool",
+]
+
+
+class UsageInputTokensDetails(BaseModel):
+ """A detailed breakdown of the input tokens."""
+
+ cached_tokens: int
+ """The number of tokens that were retrieved from the cache.
+
+ [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
+ """
+
+
+class UsageOutputTokensDetails(BaseModel):
+ """A detailed breakdown of the output tokens."""
+
+ reasoning_tokens: int
+ """The number of reasoning tokens."""
+
+ tool_output_tokens: int
+ """The number of tool output tokens."""
+
+
+class Usage(BaseModel):
+ """
+ Detailed token usage statistics for the request, including input/output token counts and detailed breakdowns.
+ """
+
+ input_tokens: int
+ """The number of input tokens."""
+
+ input_tokens_details: UsageInputTokensDetails
+ """A detailed breakdown of the input tokens."""
+
+ output_tokens: int
+ """The number of output tokens."""
+
+ output_tokens_details: UsageOutputTokensDetails
+ """A detailed breakdown of the output tokens."""
+
+ total_tokens: int
+ """The total number of tokens used."""
+
+
+class ChoiceMessageToolCallFunction(BaseModel):
+ """The function that the model called."""
+
+ arguments: str
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: str
+ """The name of the function to call."""
+
+
+class ChoiceMessageToolCall(BaseModel):
+ id: str
+ """The ID of the tool call."""
+
+ function: ChoiceMessageToolCallFunction
+ """The function that the model called."""
+
+ type: Literal["function"]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class ChoiceMessage(BaseModel):
+ """The generated message response."""
+
+ content: Optional[str] = None
+ """The generated text content."""
+
+ role: Optional[Literal["assistant"]] = None
+ """The role of the message author, which is always `assistant`."""
+
+ tool_calls: Optional[List[ChoiceMessageToolCall]] = None
+ """The tool calls generated by the model, such as function calls."""
+
+
+class ChoiceLogprobs(BaseModel):
+ """Log probability information for the choice.
+
+ Only present if logprobs was requested in the request.
+ """
+
+ content: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message content tokens with log probability information."""
+
+
+class Choice(BaseModel):
+ finish_reason: Literal["stop", "length", "tool_calls", "content_filter"]
+ """The reason the model stopped generating tokens.
+
+ This will be `stop` if the model hit a natural stop point or a provided stop
+ sequence, `length` if the maximum number of tokens specified in the request was
+ reached, or `tool_calls` if the model called a tool.
+ """
+
+ index: int
+ """The index of the choice in the list of choices."""
+
+ message: ChoiceMessage
+ """The generated message response."""
+
+ logprobs: Optional[ChoiceLogprobs] = None
+ """Log probability information for the choice.
+
+ Only present if logprobs was requested in the request.
+ """
+
+
+class OutputUnionMember0(BaseModel):
+ arguments: str
+ """JSON string of function arguments"""
+
+ call_id: str
+ """The unique ID of the function tool call"""
+
+ name: str
+ """The name of the function to call"""
+
+ type: Literal["function_call"]
+ """The type of output item"""
+
+ id: Optional[str] = None
+ """The unique ID of the function tool call (same as call_id)"""
+
+ encrypted_content: Optional[str] = None
+ """Encrypted content (optional)"""
+
+ status: Optional[str] = None
+ """Status of the item (optional, can be null)"""
+
+
+class OutputUnionMember1(BaseModel):
+ text: str
+ """The text content"""
+
+ type: Literal["text"]
+ """The type of output item"""
+
+
+class OutputUnionMember2Content(BaseModel):
+ text: str
+ """The reasoning text content"""
+
+ type: Literal["reasoning_text"]
+ """The type of content"""
+
+
+class OutputUnionMember2(BaseModel):
+ id: str
+ """The unique ID of the reasoning item"""
+
+ content: List[OutputUnionMember2Content]
+ """Array of reasoning content parts"""
+
+ summary: List[object]
+ """Summary of the reasoning (usually empty)"""
+
+ type: Literal["reasoning"]
+ """The type of output item"""
+
+ encrypted_content: Optional[str] = None
+ """Encrypted content (optional)"""
+
+ status: Optional[str] = None
+ """Status of the item (optional, can be null)"""
+
+
+Output: TypeAlias = Union[OutputUnionMember0, OutputUnionMember1, OutputUnionMember2]
+
+
+class Tool(BaseModel):
+ """Tool definition for Responses API (flat format).
+
+ This format is used by VLLM's Responses API where name, description, and parameters are at the top level of the tool object.
+ """
+
+ type: Literal["function", "web_search", "web_search_2025_08_26"]
+ """The type of the tool.
+
+ Supported values are `function` (custom tools), `web_search`, and
+ `web_search_2025_08_26` (built-in web search).
+ """
+
+ description: Optional[str] = None
+ """
+ A description of what the function does, used by the model to choose when and
+ how to call the function.
+ """
+
+ name: Optional[str] = None
+ """The name of the function to be called.
+
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+ of 64.
+ """
+
+ parameters: Optional[Dict[str, object]] = None
+ """The parameters the functions accepts, described as a JSON Schema object.
+
+ See the [guide](/docs/guides/function-calling) for examples, and the
+ [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ documentation about the format.
+
+ Omitting `parameters` defines a function with an empty parameter list.
+ """
+
+
+class CreateResponseResponse(BaseModel):
+ """
+ Represents a text-to-text response returned by the model, based on the provided input. VLLM models only.
+ """
+
+ id: str
+ """A unique identifier for the response."""
+
+ created: int
+ """The Unix timestamp (in seconds) of when the response was created."""
+
+ model: str
+ """The model used to generate the response."""
+
+ object: Literal["response"]
+ """The object type, which is always `response`."""
+
+ usage: Usage
+ """
+ Detailed token usage statistics for the request, including input/output token
+ counts and detailed breakdowns.
+ """
+
+ background: Optional[bool] = None
+ """Whether the request was processed in the background"""
+
+ choices: Optional[List[Choice]] = None
+ """A list of response choices.
+
+ Can be more than one if `n` is greater than 1. Optional - Responses API
+ primarily uses the output array.
+ """
+
+ input_messages: Optional[List[builtins.object]] = None
+ """Input messages (if applicable)"""
+
+ max_output_tokens: Optional[int] = None
+ """Maximum output tokens setting"""
+
+ max_tool_calls: Optional[int] = None
+ """Maximum tool calls setting"""
+
+ output: Optional[List[Output]] = None
+ """An array of content items generated by the model.
+
+ This includes text content, function calls, reasoning items, and other output
+ types. Use this field for Responses API compatibility.
+ """
+
+ output_messages: Optional[List[builtins.object]] = None
+ """Output messages (if applicable)"""
+
+ parallel_tool_calls: Optional[bool] = None
+ """Whether parallel tool calls are enabled"""
+
+ previous_response_id: Optional[str] = None
+ """Previous response ID (for multi-turn conversations)"""
+
+ prompt: Optional[str] = None
+ """Prompt used for the response"""
+
+ reasoning: Optional[str] = None
+ """Reasoning content"""
+
+ service_tier: Optional[str] = None
+ """Service tier used"""
+
+ status: Optional[str] = None
+ """Status of the response"""
+
+ temperature: Optional[float] = None
+ """Temperature setting used for the response"""
+
+ text: Optional[str] = None
+ """Text content"""
+
+ tool_choice: Optional[str] = None
+ """Tool choice setting used for the response"""
+
+ tools: Optional[List[Tool]] = None
+ """Tools available for the response"""
+
+ top_logprobs: Optional[int] = None
+ """Top logprobs setting"""
+
+ top_p: Optional[float] = None
+ """Top-p setting used for the response"""
+
+ truncation: Optional[str] = None
+ """Truncation setting"""
+
+ user: Optional[str] = None
+ """User identifier"""
diff --git a/src/gradient/types/shared/create_response_stream_response.py b/src/gradient/types/shared/create_response_stream_response.py
new file mode 100644
index 00000000..ef0230c8
--- /dev/null
+++ b/src/gradient/types/shared/create_response_stream_response.py
@@ -0,0 +1,139 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .completion_usage import CompletionUsage
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob
+
+__all__ = [
+ "CreateResponseStreamResponse",
+ "Choice",
+ "ChoiceDelta",
+ "ChoiceDeltaToolCall",
+ "ChoiceDeltaToolCallFunction",
+ "ChoiceLogprobs",
+]
+
+
+class ChoiceDeltaToolCallFunction(BaseModel):
+ """The function that the model called."""
+
+ arguments: str
+ """
+ The arguments to call the function with, as generated by the model in JSON
+ format. Note that the model does not always generate valid JSON, and may
+ hallucinate parameters not defined by your function schema. Validate the
+ arguments in your code before calling your function.
+ """
+
+ name: str
+ """The name of the function to call."""
+
+
+class ChoiceDeltaToolCall(BaseModel):
+ id: str
+ """The ID of the tool call."""
+
+ function: ChoiceDeltaToolCallFunction
+ """The function that the model called."""
+
+ type: Literal["function"]
+ """The type of the tool. Currently, only `function` is supported."""
+
+
+class ChoiceDelta(BaseModel):
+ """A chunk of the response message generated by the model."""
+
+ content: Optional[str] = None
+ """The contents of the chunk message.
+
+ Can be null for chunks with tool calls or other non-text content.
+ """
+
+ reasoning_content: Optional[str] = None
+ """The reasoning content generated by the model.
+
+ Only present when the model generates reasoning text.
+ """
+
+ role: Optional[Literal["assistant"]] = None
+ """The role of the message author. Only present in the first chunk."""
+
+ tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
+ """The tool calls generated by the model, such as function calls.
+
+ Only present when the model decides to call a tool.
+ """
+
+
+class ChoiceLogprobs(BaseModel):
+ """Log probability information for the choice.
+
+ Only present if logprobs was requested in the request.
+ """
+
+ content: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message content tokens with log probability information."""
+
+
+class Choice(BaseModel):
+ delta: ChoiceDelta
+ """A chunk of the response message generated by the model."""
+
+ index: int
+ """The index of the choice in the list of choices."""
+
+ finish_reason: Optional[Literal["stop", "length", "tool_calls", "content_filter"]] = None
+ """The reason the model stopped generating tokens.
+
+ This will be `stop` if the model hit a natural stop point or a provided stop
+ sequence, `length` if the maximum number of tokens specified in the request was
+ reached, or `tool_calls` if the model called a tool. Only present in the final
+ chunk.
+ """
+
+ logprobs: Optional[ChoiceLogprobs] = None
+ """Log probability information for the choice.
+
+ Only present if logprobs was requested in the request.
+ """
+
+
+class CreateResponseStreamResponse(BaseModel):
+ """
+ Represents a streamed chunk of a text-to-text response returned by the model, based on the provided input. VLLM models only.
+ """
+
+ id: str
+ """A unique identifier for the response. Each chunk has the same ID."""
+
+ choices: List[Choice]
+ """A list of response choice chunks.
+
+ Can contain more than one element if `n` is greater than 1. Can also be empty
+ for the last chunk if you set `stream_options: {"include_usage": true}`.
+ """
+
+ created: int
+ """The Unix timestamp (in seconds) of when the response was created.
+
+ Each chunk has the same timestamp.
+ """
+
+ model: str
+ """The model used to generate the response."""
+
+ object: Literal["response.chunk"]
+ """The object type, which is always `response.chunk`."""
+
+ usage: Optional[CompletionUsage] = None
+ """
+ An optional field that will only be present when you set
+ `stream_options: {"include_usage": true}` in your request. When present, it
+ contains a null value **except for the last chunk** which contains the token
+ usage statistics for the entire request. **NOTE:** If the stream is interrupted
+ or cancelled, you may not receive the final usage chunk which contains the total
+ token usage for the request.
+ """
diff --git a/src/gradient/types/shared/disk_info.py b/src/gradient/types/shared/disk_info.py
new file mode 100644
index 00000000..3c5c4911
--- /dev/null
+++ b/src/gradient/types/shared/disk_info.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["DiskInfo", "Size"]
+
+
+class Size(BaseModel):
+ amount: Optional[int] = None
+ """The amount of space allocated to the disk."""
+
+ unit: Optional[str] = None
+ """The unit of measure for the disk size."""
+
+
+class DiskInfo(BaseModel):
+ size: Optional[Size] = None
+
+ type: Optional[Literal["local", "scratch"]] = None
+ """The type of disk.
+
+ All Droplets contain a `local` disk. Additionally, GPU Droplets can also have a
+ `scratch` disk for non-persistent data.
+ """
diff --git a/src/gradient/types/shared/droplet.py b/src/gradient/types/shared/droplet.py
new file mode 100644
index 00000000..4ae5bae4
--- /dev/null
+++ b/src/gradient/types/shared/droplet.py
@@ -0,0 +1,148 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .size import Size
+from .image import Image
+from .kernel import Kernel
+from .region import Region
+from .gpu_info import GPUInfo
+from ..._models import BaseModel
+from .disk_info import DiskInfo
+from .network_v4 import NetworkV4
+from .network_v6 import NetworkV6
+from .droplet_next_backup_window import DropletNextBackupWindow
+
+__all__ = ["Droplet", "Networks"]
+
+
+class Networks(BaseModel):
+ """The details of the network that are configured for the Droplet instance.
+
+ This is an object that contains keys for IPv4 and IPv6. The value of each of these is an array that contains objects describing an individual IP resource allocated to the Droplet. These will define attributes like the IP address, netmask, and gateway of the specific network depending on the type of network it is.
+ """
+
+ v4: Optional[List[NetworkV4]] = None
+
+ v6: Optional[List[NetworkV6]] = None
+
+
+class Droplet(BaseModel):
+ id: int
+ """A unique identifier for each Droplet instance.
+
+ This is automatically generated upon Droplet creation.
+ """
+
+ backup_ids: List[int]
+ """
+ An array of backup IDs of any backups that have been taken of the Droplet
+ instance. Droplet backups are enabled at the time of the instance creation.
+ Requires `image:read` scope.
+ """
+
+ created_at: datetime
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the Droplet was created.
+ """
+
+ disk: int
+ """The size of the Droplet's disk in gigabytes."""
+
+ features: List[str]
+ """An array of features enabled on this Droplet."""
+
+ image: Image
+ """The Droplet's image. Requires `image:read` scope."""
+
+ locked: bool
+ """
+ A boolean value indicating whether the Droplet has been locked, preventing
+ actions by users.
+ """
+
+ memory: int
+ """Memory of the Droplet in megabytes."""
+
+ name: str
+ """The human-readable name set for the Droplet instance."""
+
+ networks: Networks
+ """The details of the network that are configured for the Droplet instance.
+
+ This is an object that contains keys for IPv4 and IPv6. The value of each of
+ these is an array that contains objects describing an individual IP resource
+ allocated to the Droplet. These will define attributes like the IP address,
+ netmask, and gateway of the specific network depending on the type of network it
+ is.
+ """
+
+ next_backup_window: Optional[DropletNextBackupWindow] = None
+ """
+ The details of the Droplet's backups feature, if backups are configured for the
+ Droplet. This object contains keys for the start and end times of the window
+ during which the backup will start.
+ """
+
+ region: Region
+
+ size: Size
+
+ size_slug: str
+ """The unique slug identifier for the size of this Droplet."""
+
+ snapshot_ids: List[int]
+ """
+ An array of snapshot IDs of any snapshots created from the Droplet instance.
+ Requires `image:read` scope.
+ """
+
+ status: Literal["new", "active", "off", "archive"]
+ """A status string indicating the state of the Droplet instance.
+
+ This may be "new", "active", "off", or "archive".
+ """
+
+ tags: List[str]
+ """An array of Tags the Droplet has been tagged with. Requires `tag:read` scope."""
+
+ vcpus: int
+ """The number of virtual CPUs."""
+
+ volume_ids: List[str]
+ """
+ A flat array including the unique identifier for each Block Storage volume
+ attached to the Droplet. Requires `block_storage:read` scope.
+ """
+
+ disk_info: Optional[List[DiskInfo]] = None
+ """
+ An array of objects containing information about the disks available to the
+ Droplet.
+ """
+
+ gpu_info: Optional[GPUInfo] = None
+ """
+ An object containing information about the GPU capabilities of Droplets created
+ with this size.
+ """
+
+ kernel: Optional[Kernel] = None
+ """
+ **Note**: All Droplets created after March 2017 use internal kernels by default.
+ These Droplets will have this attribute set to `null`.
+
+ The current
+ [kernel](https://docs.digitalocean.com/products/droplets/how-to/kernel/) for
+ Droplets with externally managed kernels. This will initially be set to the
+ kernel of the base image when the Droplet is created.
+ """
+
+ vpc_uuid: Optional[str] = None
+ """
+ A string specifying the UUID of the VPC to which the Droplet is assigned.
+ Requires `vpc:read` scope.
+ """
diff --git a/src/gradient/types/shared/droplet_next_backup_window.py b/src/gradient/types/shared/droplet_next_backup_window.py
new file mode 100644
index 00000000..81d07be6
--- /dev/null
+++ b/src/gradient/types/shared/droplet_next_backup_window.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+
+from ..._models import BaseModel
+
+__all__ = ["DropletNextBackupWindow"]
+
+
+class DropletNextBackupWindow(BaseModel):
+ end: Optional[datetime] = None
+ """
+ A time value given in ISO8601 combined date and time format specifying the end
+ of the Droplet's backup window.
+ """
+
+ start: Optional[datetime] = None
+ """
+ A time value given in ISO8601 combined date and time format specifying the start
+ of the Droplet's backup window.
+ """
diff --git a/src/gradient/types/shared/firewall_rule_target.py b/src/gradient/types/shared/firewall_rule_target.py
new file mode 100644
index 00000000..11f61065
--- /dev/null
+++ b/src/gradient/types/shared/firewall_rule_target.py
@@ -0,0 +1,41 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["FirewallRuleTarget"]
+
+
+class FirewallRuleTarget(BaseModel):
+ addresses: Optional[List[str]] = None
+ """
+ An array of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs,
+ and/or IPv6 CIDRs to which the firewall will allow traffic.
+ """
+
+ droplet_ids: Optional[List[int]] = None
+ """
+ An array containing the IDs of the Droplets to which the firewall will allow
+ traffic.
+ """
+
+ kubernetes_ids: Optional[List[str]] = None
+ """
+ An array containing the IDs of the Kubernetes clusters to which the firewall
+ will allow traffic.
+ """
+
+ load_balancer_uids: Optional[List[str]] = None
+ """
+ An array containing the IDs of the load balancers to which the firewall will
+ allow traffic.
+ """
+
+ tags: Optional[List[str]] = None
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names must exist in order to be referenced in a request.
+
+ Requires `tag:create` and `tag:read` scopes.
+ """
diff --git a/src/gradient/types/shared/forward_links.py b/src/gradient/types/shared/forward_links.py
new file mode 100644
index 00000000..30d46985
--- /dev/null
+++ b/src/gradient/types/shared/forward_links.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["ForwardLinks"]
+
+
+class ForwardLinks(BaseModel):
+ last: Optional[str] = None
+ """URI of the last page of the results."""
+
+ next: Optional[str] = None
+ """URI of the next page of the results."""
diff --git a/src/gradient/types/shared/garbage_collection.py b/src/gradient/types/shared/garbage_collection.py
new file mode 100644
index 00000000..f1f7f4cd
--- /dev/null
+++ b/src/gradient/types/shared/garbage_collection.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["GarbageCollection"]
+
+
+class GarbageCollection(BaseModel):
+ blobs_deleted: Optional[int] = None
+ """The number of blobs deleted as a result of this garbage collection."""
+
+ created_at: Optional[datetime] = None
+ """The time the garbage collection was created."""
+
+ freed_bytes: Optional[int] = None
+ """The number of bytes freed as a result of this garbage collection."""
+
+ registry_name: Optional[str] = None
+ """The name of the container registry."""
+
+ status: Optional[
+ Literal[
+ "requested",
+ "waiting for write JWTs to expire",
+ "scanning manifests",
+ "deleting unreferenced blobs",
+ "cancelling",
+ "failed",
+ "succeeded",
+ "cancelled",
+ ]
+ ] = None
+ """The current status of this garbage collection."""
+
+ updated_at: Optional[datetime] = None
+ """The time the garbage collection was last updated."""
+
+ uuid: Optional[str] = None
+ """A string specifying the UUID of the garbage collection."""
diff --git a/src/gradient/types/shared/gpu_info.py b/src/gradient/types/shared/gpu_info.py
new file mode 100644
index 00000000..7f9d7329
--- /dev/null
+++ b/src/gradient/types/shared/gpu_info.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["GPUInfo", "Vram"]
+
+
+class Vram(BaseModel):
+ amount: Optional[int] = None
+ """The amount of VRAM allocated to the GPU."""
+
+ unit: Optional[str] = None
+ """The unit of measure for the VRAM."""
+
+
+class GPUInfo(BaseModel):
+ """
+ An object containing information about the GPU capabilities of Droplets created with this size.
+ """
+
+ count: Optional[int] = None
+ """The number of GPUs allocated to the Droplet."""
+
+ model: Optional[str] = None
+ """The model of the GPU."""
+
+ vram: Optional[Vram] = None
diff --git a/src/gradient/types/shared/image.py b/src/gradient/types/shared/image.py
new file mode 100644
index 00000000..d8a7acde
--- /dev/null
+++ b/src/gradient/types/shared/image.py
@@ -0,0 +1,131 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["Image"]
+
+
+class Image(BaseModel):
+ id: Optional[int] = None
+ """A unique number that can be used to identify and reference a specific image."""
+
+ created_at: Optional[datetime] = None
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the image was created.
+ """
+
+ description: Optional[str] = None
+ """An optional free-form text field to describe an image."""
+
+ distribution: Optional[
+ Literal[
+ "Arch Linux",
+ "CentOS",
+ "CoreOS",
+ "Debian",
+ "Fedora",
+ "Fedora Atomic",
+ "FreeBSD",
+ "Gentoo",
+ "openSUSE",
+ "RancherOS",
+ "Rocky Linux",
+ "Ubuntu",
+ "Unknown",
+ ]
+ ] = None
+ """The name of a custom image's distribution.
+
+ Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`,
+ `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`,
+ `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but
+ ignored, and `Unknown` will be used in its place.
+ """
+
+ error_message: Optional[str] = None
+ """
+ A string containing information about errors that may occur when importing a
+ custom image.
+ """
+
+ min_disk_size: Optional[int] = None
+ """The minimum disk size in GB required for a Droplet to use this image."""
+
+ name: Optional[str] = None
+ """The display name that has been given to an image.
+
+ This is what is shown in the control panel and is generally a descriptive title
+ for the image in question.
+ """
+
+ public: Optional[bool] = None
+ """
+ This is a boolean value that indicates whether the image in question is public
+ or not. An image that is public is available to all accounts. A non-public image
+ is only accessible from your account.
+ """
+
+ regions: Optional[
+ List[
+ Literal[
+ "ams1",
+ "ams2",
+ "ams3",
+ "blr1",
+ "fra1",
+ "lon1",
+ "nyc1",
+ "nyc2",
+ "nyc3",
+ "sfo1",
+ "sfo2",
+ "sfo3",
+ "sgp1",
+ "tor1",
+ "syd1",
+ ]
+ ]
+ ] = None
+ """This attribute is an array of the regions that the image is available in.
+
+ The regions are represented by their identifying slug values.
+ """
+
+ size_gigabytes: Optional[float] = None
+ """The size of the image in gigabytes."""
+
+ slug: Optional[str] = None
+ """
+ A uniquely identifying string that is associated with each of the
+ DigitalOcean-provided public images. These can be used to reference a public
+ image as an alternative to the numeric id.
+ """
+
+ status: Optional[Literal["NEW", "available", "pending", "deleted", "retired"]] = None
+ """A status string indicating the state of a custom image.
+
+ This may be `NEW`, `available`, `pending`, `deleted`, or `retired`.
+ """
+
+ tags: Optional[List[str]] = None
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names may be for either existing or new tags.
+
+ Requires `tag:create` scope.
+ """
+
+ type: Optional[Literal["base", "snapshot", "backup", "custom", "admin"]] = None
+ """Describes the kind of image.
+
+ It may be one of `base`, `snapshot`, `backup`, `custom`, or `admin`.
+ Respectively, this specifies whether an image is a DigitalOcean base OS image,
+ user-generated Droplet snapshot, automatically created Droplet backup,
+ user-provided virtual machine image, or an image used for DigitalOcean managed
+ resources (e.g. DOKS worker nodes).
+ """
diff --git a/src/gradient/types/shared/image_gen_completed_event.py b/src/gradient/types/shared/image_gen_completed_event.py
new file mode 100644
index 00000000..de44188f
--- /dev/null
+++ b/src/gradient/types/shared/image_gen_completed_event.py
@@ -0,0 +1,61 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ImageGenCompletedEvent", "Usage", "UsageInputTokensDetails"]
+
+
+class UsageInputTokensDetails(BaseModel):
+ """The input tokens detailed information for the image generation."""
+
+ image_tokens: int
+ """The number of image tokens in the input prompt."""
+
+ text_tokens: int
+ """The number of text tokens in the input prompt."""
+
+
+class Usage(BaseModel):
+ """For `gpt-image-1` only, the token usage information for the image generation."""
+
+ input_tokens: int
+ """The number of tokens (images and text) in the input prompt."""
+
+ input_tokens_details: UsageInputTokensDetails
+ """The input tokens detailed information for the image generation."""
+
+ output_tokens: int
+ """The number of image tokens in the output image."""
+
+ total_tokens: int
+ """The total number of tokens (images and text) used for the image generation."""
+
+
+class ImageGenCompletedEvent(BaseModel):
+ """Emitted when image generation has completed and the final image is available."""
+
+ b64_json: str
+ """Base64-encoded image data, suitable for rendering as an image."""
+
+ background: Literal["transparent", "opaque", "auto"]
+ """The background setting for the generated image."""
+
+ created_at: int
+ """The Unix timestamp when the event was created."""
+
+ output_format: Literal["png", "webp", "jpeg"]
+ """The output format for the generated image."""
+
+ quality: Literal["low", "medium", "high", "auto"]
+ """The quality setting for the generated image."""
+
+ size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
+ """The size of the generated image."""
+
+ type: Literal["image_generation.completed"]
+ """The type of the event. Always `image_generation.completed`."""
+
+ usage: Usage
+ """For `gpt-image-1` only, the token usage information for the image generation."""
diff --git a/src/gradient/types/shared/image_gen_partial_image_event.py b/src/gradient/types/shared/image_gen_partial_image_event.py
new file mode 100644
index 00000000..e2740e08
--- /dev/null
+++ b/src/gradient/types/shared/image_gen_partial_image_event.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ImageGenPartialImageEvent"]
+
+
+class ImageGenPartialImageEvent(BaseModel):
+ """Emitted when a partial image is available during image generation streaming."""
+
+ b64_json: str
+ """Base64-encoded partial image data, suitable for rendering as an image."""
+
+ background: Literal["transparent", "opaque", "auto"]
+ """The background setting for the requested image."""
+
+ created_at: int
+ """The Unix timestamp when the event was created."""
+
+ output_format: Literal["png", "webp", "jpeg"]
+ """The output format for the requested image."""
+
+ partial_image_index: int
+ """0-based index for the partial image (streaming)."""
+
+ quality: Literal["low", "medium", "high", "auto"]
+ """The quality setting for the requested image."""
+
+ size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
+ """The size of the requested image."""
+
+ type: Literal["image_generation.partial_image"]
+ """The type of the event. Always `image_generation.partial_image`."""
diff --git a/src/gradient/types/shared/image_gen_stream_event.py b/src/gradient/types/shared/image_gen_stream_event.py
new file mode 100644
index 00000000..30e9571e
--- /dev/null
+++ b/src/gradient/types/shared/image_gen_stream_event.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .image_gen_completed_event import ImageGenCompletedEvent
+from .image_gen_partial_image_event import ImageGenPartialImageEvent
+
+__all__ = ["ImageGenStreamEvent"]
+
+ImageGenStreamEvent: TypeAlias = Annotated[
+ Union[ImageGenPartialImageEvent, ImageGenCompletedEvent], PropertyInfo(discriminator="type")
+]
diff --git a/src/gradient/types/shared/kernel.py b/src/gradient/types/shared/kernel.py
new file mode 100644
index 00000000..79091d33
--- /dev/null
+++ b/src/gradient/types/shared/kernel.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["Kernel"]
+
+
+class Kernel(BaseModel):
+ """
+ **Note**: All Droplets created after March 2017 use internal kernels by default.
+ These Droplets will have this attribute set to `null`.
+
+ The current [kernel](https://docs.digitalocean.com/products/droplets/how-to/kernel/)
+ for Droplets with externally managed kernels. This will initially be set to
+ the kernel of the base image when the Droplet is created.
+ """
+
+ id: Optional[int] = None
+ """A unique number used to identify and reference a specific kernel."""
+
+ name: Optional[str] = None
+ """The display name of the kernel.
+
+ This is shown in the web UI and is generally a descriptive title for the kernel
+ in question.
+ """
+
+ version: Optional[str] = None
+ """
+ A standard kernel version string representing the version, patch, and release
+ information.
+ """
diff --git a/src/gradient/types/shared/meta_properties.py b/src/gradient/types/shared/meta_properties.py
new file mode 100644
index 00000000..b7d703df
--- /dev/null
+++ b/src/gradient/types/shared/meta_properties.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["MetaProperties"]
+
+
+class MetaProperties(BaseModel):
+ """Information about the response itself."""
+
+ total: Optional[int] = None
+ """Number of objects returned by the request."""
diff --git a/src/gradient/types/shared/network_v4.py b/src/gradient/types/shared/network_v4.py
new file mode 100644
index 00000000..bbf8490a
--- /dev/null
+++ b/src/gradient/types/shared/network_v4.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["NetworkV4"]
+
+
+class NetworkV4(BaseModel):
+ gateway: Optional[str] = None
+ """The gateway of the specified IPv4 network interface.
+
+ For private interfaces, a gateway is not provided. This is denoted by returning
+ `nil` as its value.
+ """
+
+ ip_address: Optional[str] = None
+ """The IP address of the IPv4 network interface."""
+
+ netmask: Optional[str] = None
+ """The netmask of the IPv4 network interface."""
+
+ type: Optional[Literal["public", "private"]] = None
+ """The type of the IPv4 network interface."""
diff --git a/src/gradient/types/shared/network_v6.py b/src/gradient/types/shared/network_v6.py
new file mode 100644
index 00000000..a3eb6b42
--- /dev/null
+++ b/src/gradient/types/shared/network_v6.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["NetworkV6"]
+
+
+class NetworkV6(BaseModel):
+ gateway: Optional[str] = None
+ """The gateway of the specified IPv6 network interface."""
+
+ ip_address: Optional[str] = None
+ """The IP address of the IPv6 network interface."""
+
+ netmask: Optional[int] = None
+ """The netmask of the IPv6 network interface."""
+
+ type: Optional[Literal["public"]] = None
+ """The type of the IPv6 network interface.
+
+ **Note**: IPv6 private networking is not currently supported.
+ """
diff --git a/src/gradient/types/shared/page_links.py b/src/gradient/types/shared/page_links.py
new file mode 100644
index 00000000..bfceabef
--- /dev/null
+++ b/src/gradient/types/shared/page_links.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import TypeAlias
+
+from ..._models import BaseModel
+from .forward_links import ForwardLinks
+from .backward_links import BackwardLinks
+
+__all__ = ["PageLinks", "Pages"]
+
+Pages: TypeAlias = Union[ForwardLinks, BackwardLinks, object]
+
+
+class PageLinks(BaseModel):
+ pages: Optional[Pages] = None
diff --git a/src/gradient/types/shared/region.py b/src/gradient/types/shared/region.py
new file mode 100644
index 00000000..d2fe7c51
--- /dev/null
+++ b/src/gradient/types/shared/region.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ..._models import BaseModel
+
+__all__ = ["Region"]
+
+
+class Region(BaseModel):
+ available: bool
+ """
+ This is a boolean value that represents whether new Droplets can be created in
+ this region.
+ """
+
+ features: List[str]
+ """
+ This attribute is set to an array which contains features available in this
+ region
+ """
+
+ name: str
+ """The display name of the region.
+
+ This will be a full name that is used in the control panel and other interfaces.
+ """
+
+ sizes: List[str]
+ """
+ This attribute is set to an array which contains the identifying slugs for the
+ sizes available in this region. sizes:read is required to view.
+ """
+
+ slug: str
+ """A human-readable string that is used as a unique identifier for each region."""
diff --git a/src/gradient/types/shared/size.py b/src/gradient/types/shared/size.py
new file mode 100644
index 00000000..73abb7dd
--- /dev/null
+++ b/src/gradient/types/shared/size.py
@@ -0,0 +1,79 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .gpu_info import GPUInfo
+from ..._models import BaseModel
+from .disk_info import DiskInfo
+
+__all__ = ["Size"]
+
+
+class Size(BaseModel):
+ available: bool
+ """
+ This is a boolean value that represents whether new Droplets can be created with
+ this size.
+ """
+
+ description: str
+ """A string describing the class of Droplets created from this size.
+
+ For example: Basic, General Purpose, CPU-Optimized, Memory-Optimized, or
+ Storage-Optimized.
+ """
+
+ disk: int
+ """The amount of disk space set aside for Droplets of this size.
+
+ The value is represented in gigabytes.
+ """
+
+ memory: int
+ """The amount of RAM allocated to Droplets created of this size.
+
+ The value is represented in megabytes.
+ """
+
+ price_hourly: float
+ """This describes the price of the Droplet size as measured hourly.
+
+ The value is measured in US dollars.
+ """
+
+ price_monthly: float
+ """
+ This attribute describes the monthly cost of this Droplet size if the Droplet is
+ kept for an entire month. The value is measured in US dollars.
+ """
+
+ regions: List[str]
+ """
+ An array containing the region slugs where this size is available for Droplet
+ creates.
+ """
+
+ slug: str
+ """A human-readable string that is used to uniquely identify each size."""
+
+ transfer: float
+ """
+ The amount of transfer bandwidth that is available for Droplets created in this
+ size. This only counts traffic on the public interface. The value is given in
+ terabytes.
+ """
+
+ vcpus: int
+ """The number of CPUs allocated to Droplets of this size."""
+
+ disk_info: Optional[List[DiskInfo]] = None
+ """
+ An array of objects containing information about the disks available to Droplets
+ created with this size.
+ """
+
+ gpu_info: Optional[GPUInfo] = None
+ """
+ An object containing information about the GPU capabilities of Droplets created
+ with this size.
+ """
diff --git a/src/gradient/types/shared/snapshots.py b/src/gradient/types/shared/snapshots.py
new file mode 100644
index 00000000..940b58c8
--- /dev/null
+++ b/src/gradient/types/shared/snapshots.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["Snapshots"]
+
+
+class Snapshots(BaseModel):
+ id: str
+ """The unique identifier for the snapshot."""
+
+ created_at: datetime
+ """
+ A time value given in ISO8601 combined date and time format that represents when
+ the snapshot was created.
+ """
+
+ min_disk_size: int
+ """The minimum size in GB required for a volume or Droplet to use this snapshot."""
+
+ name: str
+ """A human-readable name for the snapshot."""
+
+ regions: List[str]
+ """An array of the regions that the snapshot is available in.
+
+ The regions are represented by their identifying slug values.
+ """
+
+ resource_id: str
+ """The unique identifier for the resource that the snapshot originated from."""
+
+ resource_type: Literal["droplet", "volume"]
+ """The type of resource that the snapshot originated from."""
+
+ size_gigabytes: float
+ """The billable size of the snapshot in gigabytes."""
+
+ tags: Optional[List[str]] = None
+ """An array of Tags the snapshot has been tagged with.
+
+ Requires `tag:read` scope.
+ """
diff --git a/src/gradient/types/shared/subscription.py b/src/gradient/types/shared/subscription.py
new file mode 100644
index 00000000..4d77a9b8
--- /dev/null
+++ b/src/gradient/types/shared/subscription.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+
+from ..._models import BaseModel
+from .subscription_tier_base import SubscriptionTierBase
+
+__all__ = ["Subscription"]
+
+
+class Subscription(BaseModel):
+ created_at: Optional[datetime] = None
+ """The time at which the subscription was created."""
+
+ tier: Optional[SubscriptionTierBase] = None
+
+ updated_at: Optional[datetime] = None
+ """The time at which the subscription was last updated."""
diff --git a/src/gradient/types/shared/subscription_tier_base.py b/src/gradient/types/shared/subscription_tier_base.py
new file mode 100644
index 00000000..65e1a316
--- /dev/null
+++ b/src/gradient/types/shared/subscription_tier_base.py
@@ -0,0 +1,44 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["SubscriptionTierBase"]
+
+
+class SubscriptionTierBase(BaseModel):
+ allow_storage_overage: Optional[bool] = None
+ """
+ A boolean indicating whether the subscription tier supports additional storage
+ above what is included in the base plan at an additional cost per GiB used.
+ """
+
+ included_bandwidth_bytes: Optional[int] = None
+ """
+ The amount of outbound data transfer included in the subscription tier in bytes.
+ """
+
+ included_repositories: Optional[int] = None
+ """The number of repositories included in the subscription tier.
+
+ `0` indicates that the subscription tier includes unlimited repositories.
+ """
+
+ included_storage_bytes: Optional[int] = None
+ """The amount of storage included in the subscription tier in bytes."""
+
+ monthly_price_in_cents: Optional[int] = None
+ """The monthly cost of the subscription tier in cents."""
+
+ name: Optional[str] = None
+ """The name of the subscription tier."""
+
+ slug: Optional[str] = None
+ """The slug identifier of the subscription tier."""
+
+ storage_overage_price_in_cents: Optional[int] = None
+ """
+ The price paid in cents per GiB for additional storage beyond what is included
+ in the subscription plan.
+ """
diff --git a/src/gradient/types/shared/vpc_peering.py b/src/gradient/types/shared/vpc_peering.py
new file mode 100644
index 00000000..ef674e23
--- /dev/null
+++ b/src/gradient/types/shared/vpc_peering.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["VpcPeering"]
+
+
+class VpcPeering(BaseModel):
+ id: Optional[str] = None
+ """A unique ID that can be used to identify and reference the VPC peering."""
+
+ created_at: Optional[datetime] = None
+ """A time value given in ISO8601 combined date and time format."""
+
+ name: Optional[str] = None
+ """The name of the VPC peering.
+
+ Must be unique within the team and may only contain alphanumeric characters and
+ dashes.
+ """
+
+ status: Optional[Literal["PROVISIONING", "ACTIVE", "DELETING"]] = None
+ """The current status of the VPC peering."""
+
+ vpc_ids: Optional[List[str]] = None
+ """An array of the two peered VPCs IDs."""
diff --git a/src/gradient/types/shared_params/__init__.py b/src/gradient/types/shared_params/__init__.py
new file mode 100644
index 00000000..ccdec8fd
--- /dev/null
+++ b/src/gradient/types/shared_params/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget
diff --git a/src/gradient/types/shared_params/firewall_rule_target.py b/src/gradient/types/shared_params/firewall_rule_target.py
new file mode 100644
index 00000000..7f317f6c
--- /dev/null
+++ b/src/gradient/types/shared_params/firewall_rule_target.py
@@ -0,0 +1,44 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable, Optional
+from typing_extensions import TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = ["FirewallRuleTarget"]
+
+
+class FirewallRuleTarget(TypedDict, total=False):
+ addresses: SequenceNotStr[str]
+ """
+ An array of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs,
+ and/or IPv6 CIDRs to which the firewall will allow traffic.
+ """
+
+ droplet_ids: Iterable[int]
+ """
+ An array containing the IDs of the Droplets to which the firewall will allow
+ traffic.
+ """
+
+ kubernetes_ids: SequenceNotStr[str]
+ """
+ An array containing the IDs of the Kubernetes clusters to which the firewall
+ will allow traffic.
+ """
+
+ load_balancer_uids: SequenceNotStr[str]
+ """
+ An array containing the IDs of the load balancers to which the firewall will
+ allow traffic.
+ """
+
+ tags: Optional[SequenceNotStr[str]]
+ """A flat array of tag names as strings to be applied to the resource.
+
+ Tag names must exist in order to be referenced in a request.
+
+ Requires `tag:create` and `tag:read` scopes.
+ """
diff --git a/tests/api_resources/fine_tuning/__init__.py b/tests/api_resources/agents/__init__.py
similarity index 100%
rename from tests/api_resources/fine_tuning/__init__.py
rename to tests/api_resources/agents/__init__.py
diff --git a/tests/api_resources/fine_tuning/checkpoints/__init__.py b/tests/api_resources/agents/chat/__init__.py
similarity index 100%
rename from tests/api_resources/fine_tuning/checkpoints/__init__.py
rename to tests/api_resources/agents/chat/__init__.py
diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py
new file mode 100644
index 00000000..7574766e
--- /dev/null
+++ b/tests/api_resources/agents/chat/test_completions.py
@@ -0,0 +1,403 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents.chat import CompletionCreateResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestCompletions:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_1(self, client: Gradient) -> None:
+ completion = client.agents.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ )
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None:
+ completion = client.agents.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ frequency_penalty=-2,
+ logit_bias={"foo": 0},
+ logprobs=True,
+ max_completion_tokens=256,
+ max_tokens=0,
+ metadata={"foo": "string"},
+ n=1,
+ presence_penalty=-2,
+ reasoning_effort="none",
+ stop="\n",
+ stream=False,
+ stream_options={"include_usage": True},
+ temperature=1,
+ tool_choice="none",
+ tools=[
+ {
+ "function": {
+ "name": "name",
+ "description": "description",
+ "parameters": {"foo": "bar"},
+ },
+ "type": "function",
+ }
+ ],
+ top_logprobs=0,
+ top_p=1,
+ user="user-1234",
+ )
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_1(self, client: Gradient) -> None:
+ response = client.agents.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ completion = response.parse()
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
+ with client.agents.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ completion = response.parse()
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_2(self, client: Gradient) -> None:
+ completion_stream = client.agents.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
+ )
+ completion_stream.response.close()
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None:
+ completion_stream = client.agents.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
+ frequency_penalty=-2,
+ logit_bias={"foo": 0},
+ logprobs=True,
+ max_completion_tokens=256,
+ max_tokens=0,
+ metadata={"foo": "string"},
+ n=1,
+ presence_penalty=-2,
+ reasoning_effort="none",
+ stop="\n",
+ stream_options={"include_usage": True},
+ temperature=1,
+ tool_choice="none",
+ tools=[
+ {
+ "function": {
+ "name": "name",
+ "description": "description",
+ "parameters": {"foo": "bar"},
+ },
+ "type": "function",
+ }
+ ],
+ top_logprobs=0,
+ top_p=1,
+ user="user-1234",
+ )
+ completion_stream.response.close()
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
+ response = client.agents.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ stream.close()
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
+ with client.agents.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = response.parse()
+ stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncCompletions:
+ parametrize = pytest.mark.parametrize(
+ "async_client",
+ [False, True, {"http_client": "aiohttp"}],
+ indirect=True,
+ ids=["loose", "strict", "aiohttp"],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
+ completion = await async_client.agents.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ )
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ completion = await async_client.agents.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ frequency_penalty=-2,
+ logit_bias={"foo": 0},
+ logprobs=True,
+ max_completion_tokens=256,
+ max_tokens=0,
+ metadata={"foo": "string"},
+ n=1,
+ presence_penalty=-2,
+ reasoning_effort="none",
+ stop="\n",
+ stream=False,
+ stream_options={"include_usage": True},
+ temperature=1,
+ tool_choice="none",
+ tools=[
+ {
+ "function": {
+ "name": "name",
+ "description": "description",
+ "parameters": {"foo": "bar"},
+ },
+ "type": "function",
+ }
+ ],
+ top_logprobs=0,
+ top_p=1,
+ user="user-1234",
+ )
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ completion = await response.parse()
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ completion = await response.parse()
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
+ completion_stream = await async_client.agents.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
+ )
+ await completion_stream.response.aclose()
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ completion_stream = await async_client.agents.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
+ frequency_penalty=-2,
+ logit_bias={"foo": 0},
+ logprobs=True,
+ max_completion_tokens=256,
+ max_tokens=0,
+ metadata={"foo": "string"},
+ n=1,
+ presence_penalty=-2,
+ reasoning_effort="none",
+ stop="\n",
+ stream_options={"include_usage": True},
+ temperature=1,
+ tool_choice="none",
+ tools=[
+ {
+ "function": {
+ "name": "name",
+ "description": "description",
+ "parameters": {"foo": "bar"},
+ },
+ "type": "function",
+ }
+ ],
+ top_logprobs=0,
+ top_p=1,
+ user="user-1234",
+ )
+ await completion_stream.response.aclose()
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = await response.parse()
+ await stream.close()
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = await response.parse()
+ await stream.close()
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/fine_tuning/jobs/__init__.py b/tests/api_resources/agents/evaluation_metrics/__init__.py
similarity index 100%
rename from tests/api_resources/fine_tuning/jobs/__init__.py
rename to tests/api_resources/agents/evaluation_metrics/__init__.py
diff --git a/tests/api_resources/organization/__init__.py b/tests/api_resources/agents/evaluation_metrics/anthropic/__init__.py
similarity index 100%
rename from tests/api_resources/organization/__init__.py
rename to tests/api_resources/agents/evaluation_metrics/anthropic/__init__.py
diff --git a/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py
new file mode 100644
index 00000000..3cb43489
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py
@@ -0,0 +1,557 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents.evaluation_metrics.anthropic import (
+ KeyListResponse,
+ KeyCreateResponse,
+ KeyDeleteResponse,
+ KeyUpdateResponse,
+ KeyRetrieveResponse,
+ KeyListAgentsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.create(
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_agents(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_agents_with_all_params(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_agents(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_agents(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list_agents(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents(
+ uuid="",
+ )
+
+
+class TestAsyncKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.create(
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_agents(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_agents(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list_agents(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents(
+ uuid="",
+ )
diff --git a/tests/api_resources/organization/projects/__init__.py b/tests/api_resources/agents/evaluation_metrics/oauth2/__init__.py
similarity index 100%
rename from tests/api_resources/organization/projects/__init__.py
rename to tests/api_resources/agents/evaluation_metrics/oauth2/__init__.py
diff --git a/tests/api_resources/agents/evaluation_metrics/oauth2/test_dropbox.py b/tests/api_resources/agents/evaluation_metrics/oauth2/test_dropbox.py
new file mode 100644
index 00000000..64aea805
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/oauth2/test_dropbox.py
@@ -0,0 +1,100 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents.evaluation_metrics.oauth2 import DropboxCreateTokensResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestDropbox:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_tokens(self, client: Gradient) -> None:
+ dropbox = client.agents.evaluation_metrics.oauth2.dropbox.create_tokens()
+ assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_tokens_with_all_params(self, client: Gradient) -> None:
+ dropbox = client.agents.evaluation_metrics.oauth2.dropbox.create_tokens(
+ code="example string",
+ redirect_url="example string",
+ )
+ assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_tokens(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.oauth2.dropbox.with_raw_response.create_tokens()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dropbox = response.parse()
+ assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_tokens(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.oauth2.dropbox.with_streaming_response.create_tokens() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dropbox = response.parse()
+ assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncDropbox:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_tokens(self, async_client: AsyncGradient) -> None:
+ dropbox = await async_client.agents.evaluation_metrics.oauth2.dropbox.create_tokens()
+ assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_tokens_with_all_params(self, async_client: AsyncGradient) -> None:
+ dropbox = await async_client.agents.evaluation_metrics.oauth2.dropbox.create_tokens(
+ code="example string",
+ redirect_url="example string",
+ )
+ assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_tokens(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.oauth2.dropbox.with_raw_response.create_tokens()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dropbox = await response.parse()
+ assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_tokens(self, async_client: AsyncGradient) -> None:
+ async with (
+ async_client.agents.evaluation_metrics.oauth2.dropbox.with_streaming_response.create_tokens()
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dropbox = await response.parse()
+ assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/threads/__init__.py b/tests/api_resources/agents/evaluation_metrics/openai/__init__.py
similarity index 100%
rename from tests/api_resources/threads/__init__.py
rename to tests/api_resources/agents/evaluation_metrics/openai/__init__.py
diff --git a/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py
new file mode 100644
index 00000000..475c52f8
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py
@@ -0,0 +1,557 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents.evaluation_metrics.openai import (
+ KeyListResponse,
+ KeyCreateResponse,
+ KeyDeleteResponse,
+ KeyUpdateResponse,
+ KeyRetrieveResponse,
+ KeyListAgentsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.create(
+ api_key='"sk-proj--123456789098765432123456789"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.openai.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.openai.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_agents(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_agents_with_all_params(self, client: Gradient) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_agents(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_agents(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list_agents(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents(
+ uuid="",
+ )
+
+
+class TestAsyncKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.create(
+ api_key='"sk-proj--123456789098765432123456789"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_agents(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradient) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_agents(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list_agents(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents(
+ uuid="",
+ )
diff --git a/tests/api_resources/agents/evaluation_metrics/test_oauth2.py b/tests/api_resources/agents/evaluation_metrics/test_oauth2.py
new file mode 100644
index 00000000..10137439
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/test_oauth2.py
@@ -0,0 +1,98 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents.evaluation_metrics import Oauth2GenerateURLResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestOauth2:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_generate_url(self, client: Gradient) -> None:
+ oauth2 = client.agents.evaluation_metrics.oauth2.generate_url()
+ assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_generate_url_with_all_params(self, client: Gradient) -> None:
+ oauth2 = client.agents.evaluation_metrics.oauth2.generate_url(
+ redirect_url="redirect_url",
+ type="type",
+ )
+ assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_generate_url(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.oauth2.with_raw_response.generate_url()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ oauth2 = response.parse()
+ assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_generate_url(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.oauth2.with_streaming_response.generate_url() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ oauth2 = response.parse()
+ assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncOauth2:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_generate_url(self, async_client: AsyncGradient) -> None:
+ oauth2 = await async_client.agents.evaluation_metrics.oauth2.generate_url()
+ assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_generate_url_with_all_params(self, async_client: AsyncGradient) -> None:
+ oauth2 = await async_client.agents.evaluation_metrics.oauth2.generate_url(
+ redirect_url="redirect_url",
+ type="type",
+ )
+ assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_generate_url(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.oauth2.with_raw_response.generate_url()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ oauth2 = await response.parse()
+ assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_generate_url(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.oauth2.with_streaming_response.generate_url() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ oauth2 = await response.parse()
+ assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/agents/evaluation_metrics/test_scheduled_indexing.py b/tests/api_resources/agents/evaluation_metrics/test_scheduled_indexing.py
new file mode 100644
index 00000000..788b758a
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/test_scheduled_indexing.py
@@ -0,0 +1,274 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents.evaluation_metrics import (
+ ScheduledIndexingCreateResponse,
+ ScheduledIndexingDeleteResponse,
+ ScheduledIndexingRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestScheduledIndexing:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.create()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.create(
+ days=[123],
+ knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ time="example string",
+ )
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncScheduledIndexing:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.create()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.create(
+ days=[123],
+ knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ time="example string",
+ )
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with (
+ async_client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.create()
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py
new file mode 100644
index 00000000..3493f322
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py
@@ -0,0 +1,521 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents.evaluation_metrics import (
+ WorkspaceListResponse,
+ WorkspaceCreateResponse,
+ WorkspaceDeleteResponse,
+ WorkspaceUpdateResponse,
+ WorkspaceRetrieveResponse,
+ WorkspaceListEvaluationTestCasesResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestWorkspaces:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.create()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.create(
+ agent_uuids=["example string"],
+ description="example string",
+ name="example name",
+ )
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.update(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.update(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ description="example string",
+ name="example name",
+ body_workspace_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.update(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.update(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.with_raw_response.update(
+ path_workspace_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.list()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_evaluation_test_cases(self, client: Gradient) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_evaluation_test_cases(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_evaluation_test_cases(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list_evaluation_test_cases(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases(
+ "",
+ )
+
+
+class TestAsyncWorkspaces:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.create()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.create(
+ agent_uuids=["example string"],
+ description="example string",
+ name="example name",
+ )
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.update(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.update(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ description="example string",
+ name="example name",
+ body_workspace_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.update(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update(
+ path_workspace_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.list()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases(
+ "",
+ )
diff --git a/tests/api_resources/threads/runs/__init__.py b/tests/api_resources/agents/evaluation_metrics/workspaces/__init__.py
similarity index 100%
rename from tests/api_resources/threads/runs/__init__.py
rename to tests/api_resources/agents/evaluation_metrics/workspaces/__init__.py
diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py
new file mode 100644
index 00000000..33166f69
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py
@@ -0,0 +1,237 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents.evaluation_metrics.workspaces import (
+ AgentListResponse,
+ AgentMoveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAgents:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ agent = client.agents.evaluation_metrics.workspaces.agents.list(
+ workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ agent = client.agents.evaluation_metrics.workspaces.agents.list(
+ workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ only_deployed=True,
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list(
+ workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list(
+ workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list(
+ workspace_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_move(self, client: Gradient) -> None:
+ agent = client.agents.evaluation_metrics.workspaces.agents.move(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_move_with_all_params(self, client: Gradient) -> None:
+ agent = client.agents.evaluation_metrics.workspaces.agents.move(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuids=["example string"],
+ body_workspace_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_move(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_move(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_move(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move(
+ path_workspace_uuid="",
+ )
+
+
+class TestAsyncAgents:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.evaluation_metrics.workspaces.agents.list(
+ workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.evaluation_metrics.workspaces.agents.list(
+ workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ only_deployed=True,
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list(
+ workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list(
+ workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list(
+ workspace_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_move(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.evaluation_metrics.workspaces.agents.move(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_move_with_all_params(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.evaluation_metrics.workspaces.agents.move(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuids=["example string"],
+ body_workspace_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_move(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_move(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move(
+ path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_move(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move(
+ path_workspace_uuid="",
+ )
diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py
new file mode 100644
index 00000000..438f97b9
--- /dev/null
+++ b/tests/api_resources/agents/test_api_keys.py
@@ -0,0 +1,574 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents import (
+ APIKeyListResponse,
+ APIKeyCreateResponse,
+ APIKeyDeleteResponse,
+ APIKeyUpdateResponse,
+ APIKeyRegenerateResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAPIKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ api_key = client.agents.api_keys.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ api_key = client.agents.api_keys.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ name="Production Key",
+ )
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.api_keys.with_raw_response.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.api_keys.with_streaming_response.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_create(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"):
+ client.agents.api_keys.with_raw_response.create(
+ path_agent_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ api_key = client.agents.api_keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ api_key = client.agents.api_keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.agents.api_keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.agents.api_keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"):
+ client.agents.api_keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.agents.api_keys.with_raw_response.update(
+ path_api_key_uuid="",
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ api_key = client.agents.api_keys.list(
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ api_key = client.agents.api_keys.list(
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.agents.api_keys.with_raw_response.list(
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.agents.api_keys.with_streaming_response.list(
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ client.agents.api_keys.with_raw_response.list(
+ agent_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ api_key = client.agents.api_keys.delete(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.agents.api_keys.with_raw_response.delete(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.agents.api_keys.with_streaming_response.delete(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ client.agents.api_keys.with_raw_response.delete(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.agents.api_keys.with_raw_response.delete(
+ api_key_uuid="",
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_regenerate(self, client: Gradient) -> None:
+ api_key = client.agents.api_keys.regenerate(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_regenerate(self, client: Gradient) -> None:
+ response = client.agents.api_keys.with_raw_response.regenerate(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_regenerate(self, client: Gradient) -> None:
+ with client.agents.api_keys.with_streaming_response.regenerate(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_regenerate(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ client.agents.api_keys.with_raw_response.regenerate(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.agents.api_keys.with_raw_response.regenerate(
+ api_key_uuid="",
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+
+class TestAsyncAPIKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.agents.api_keys.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.agents.api_keys.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ name="Production Key",
+ )
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.api_keys.with_raw_response.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = await response.parse()
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.api_keys.with_streaming_response.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"):
+ await async_client.agents.api_keys.with_raw_response.create(
+ path_agent_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.agents.api_keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.agents.api_keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.api_keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = await response.parse()
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.api_keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"):
+ await async_client.agents.api_keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.agents.api_keys.with_raw_response.update(
+ path_api_key_uuid="",
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.agents.api_keys.list(
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.agents.api_keys.list(
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.api_keys.with_raw_response.list(
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = await response.parse()
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.api_keys.with_streaming_response.list(
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ await async_client.agents.api_keys.with_raw_response.list(
+ agent_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.agents.api_keys.delete(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.api_keys.with_raw_response.delete(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = await response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.api_keys.with_streaming_response.delete(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ await async_client.agents.api_keys.with_raw_response.delete(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.agents.api_keys.with_raw_response.delete(
+ api_key_uuid="",
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_regenerate(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.agents.api_keys.regenerate(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_regenerate(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.api_keys.with_raw_response.regenerate(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = await response.parse()
+ assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_regenerate(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.api_keys.with_streaming_response.regenerate(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_regenerate(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ await async_client.agents.api_keys.with_raw_response.regenerate(
+ api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.agents.api_keys.with_raw_response.regenerate(
+ api_key_uuid="",
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py
new file mode 100644
index 00000000..3ab8adb8
--- /dev/null
+++ b/tests/api_resources/agents/test_evaluation_datasets.py
@@ -0,0 +1,211 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents import (
+ EvaluationDatasetCreateResponse,
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestEvaluationDatasets:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ evaluation_dataset = client.agents.evaluation_datasets.create()
+ assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ evaluation_dataset = client.agents.evaluation_datasets.create(
+ dataset_type="EVALUATION_DATASET_TYPE_UNKNOWN",
+ file_upload_dataset={
+ "original_file_name": "example name",
+ "size_in_bytes": "12345",
+ "stored_object_key": "example string",
+ },
+ name="example name",
+ )
+ assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.evaluation_datasets.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_dataset = response.parse()
+ assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.evaluation_datasets.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_dataset = response.parse()
+ assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_file_upload_presigned_urls(self, client: Gradient) -> None:
+ evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls()
+ assert_matches_type(
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_file_upload_presigned_urls_with_all_params(self, client: Gradient) -> None:
+ evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls(
+ files=[
+ {
+ "file_name": "example name",
+ "file_size": "file_size",
+ }
+ ],
+ )
+ assert_matches_type(
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_file_upload_presigned_urls(self, client: Gradient) -> None:
+ response = client.agents.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_dataset = response.parse()
+ assert_matches_type(
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_file_upload_presigned_urls(self, client: Gradient) -> None:
+ with client.agents.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_dataset = response.parse()
+ assert_matches_type(
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"]
+ )
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncEvaluationDatasets:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ evaluation_dataset = await async_client.agents.evaluation_datasets.create()
+ assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ evaluation_dataset = await async_client.agents.evaluation_datasets.create(
+ dataset_type="EVALUATION_DATASET_TYPE_UNKNOWN",
+ file_upload_dataset={
+ "original_file_name": "example name",
+ "size_in_bytes": "12345",
+ "stored_object_key": "example string",
+ },
+ name="example name",
+ )
+ assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_datasets.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_dataset = await response.parse()
+ assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_datasets.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_dataset = await response.parse()
+ assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_file_upload_presigned_urls(self, async_client: AsyncGradient) -> None:
+ evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls()
+ assert_matches_type(
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_file_upload_presigned_urls_with_all_params(self, async_client: AsyncGradient) -> None:
+ evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls(
+ files=[
+ {
+ "file_name": "example name",
+ "file_size": "file_size",
+ }
+ ],
+ )
+ assert_matches_type(
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_file_upload_presigned_urls(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_dataset = await response.parse()
+ assert_matches_type(
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_file_upload_presigned_urls(self, async_client: AsyncGradient) -> None:
+ async with (
+ async_client.agents.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls()
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_dataset = await response.parse()
+ assert_matches_type(
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"]
+ )
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py
new file mode 100644
index 00000000..e27b820d
--- /dev/null
+++ b/tests/api_resources/agents/test_evaluation_metrics.py
@@ -0,0 +1,157 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents import (
+ EvaluationMetricListResponse,
+ EvaluationMetricListRegionsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestEvaluationMetrics:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ evaluation_metric = client.agents.evaluation_metrics.list()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_metric = response.parse()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_metric = response.parse()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_regions(self, client: Gradient) -> None:
+ evaluation_metric = client.agents.evaluation_metrics.list_regions()
+ assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_regions_with_all_params(self, client: Gradient) -> None:
+ evaluation_metric = client.agents.evaluation_metrics.list_regions(
+ serves_batch=True,
+ serves_inference=True,
+ )
+ assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_regions(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.with_raw_response.list_regions()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_metric = response.parse()
+ assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_regions(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.with_streaming_response.list_regions() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_metric = response.parse()
+ assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncEvaluationMetrics:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ evaluation_metric = await async_client.agents.evaluation_metrics.list()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_metric = await response.parse()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_metric = await response.parse()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_regions(self, async_client: AsyncGradient) -> None:
+ evaluation_metric = await async_client.agents.evaluation_metrics.list_regions()
+ assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_regions_with_all_params(self, async_client: AsyncGradient) -> None:
+ evaluation_metric = await async_client.agents.evaluation_metrics.list_regions(
+ serves_batch=True,
+ serves_inference=True,
+ )
+ assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_regions(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.with_raw_response.list_regions()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_metric = await response.parse()
+ assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_regions(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.with_streaming_response.list_regions() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_metric = await response.parse()
+ assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py
new file mode 100644
index 00000000..faefa31b
--- /dev/null
+++ b/tests/api_resources/agents/test_evaluation_runs.py
@@ -0,0 +1,387 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents import (
+ EvaluationRunCreateResponse,
+ EvaluationRunRetrieveResponse,
+ EvaluationRunListResultsResponse,
+ EvaluationRunRetrieveResultsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestEvaluationRuns:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ evaluation_run = client.agents.evaluation_runs.create()
+ assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ evaluation_run = client.agents.evaluation_runs.create(
+ agent_deployment_names=["example string"],
+ agent_uuids=["example string"],
+ run_name="Evaluation Run Name",
+ test_case_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.evaluation_runs.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.evaluation_runs.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ evaluation_run = client.agents.evaluation_runs.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.agents.evaluation_runs.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.agents.evaluation_runs.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
+ client.agents.evaluation_runs.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_results(self, client: Gradient) -> None:
+ evaluation_run = client.agents.evaluation_runs.list_results(
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_results_with_all_params(self, client: Gradient) -> None:
+ evaluation_run = client.agents.evaluation_runs.list_results(
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_results(self, client: Gradient) -> None:
+ response = client.agents.evaluation_runs.with_raw_response.list_results(
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_results(self, client: Gradient) -> None:
+ with client.agents.evaluation_runs.with_streaming_response.list_results(
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list_results(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
+ client.agents.evaluation_runs.with_raw_response.list_results(
+ evaluation_run_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_results(self, client: Gradient) -> None:
+ evaluation_run = client.agents.evaluation_runs.retrieve_results(
+ prompt_id=1,
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve_results(self, client: Gradient) -> None:
+ response = client.agents.evaluation_runs.with_raw_response.retrieve_results(
+ prompt_id=1,
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve_results(self, client: Gradient) -> None:
+ with client.agents.evaluation_runs.with_streaming_response.retrieve_results(
+ prompt_id=1,
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve_results(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
+ client.agents.evaluation_runs.with_raw_response.retrieve_results(
+ prompt_id=1,
+ evaluation_run_uuid="",
+ )
+
+
+class TestAsyncEvaluationRuns:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ evaluation_run = await async_client.agents.evaluation_runs.create()
+ assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ evaluation_run = await async_client.agents.evaluation_runs.create(
+ agent_deployment_names=["example string"],
+ agent_uuids=["example string"],
+ run_name="Evaluation Run Name",
+ test_case_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_runs.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_runs.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ evaluation_run = await async_client.agents.evaluation_runs.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_runs.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_runs.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
+ await async_client.agents.evaluation_runs.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_results(self, async_client: AsyncGradient) -> None:
+ evaluation_run = await async_client.agents.evaluation_runs.list_results(
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_results_with_all_params(self, async_client: AsyncGradient) -> None:
+ evaluation_run = await async_client.agents.evaluation_runs.list_results(
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_results(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_runs.with_raw_response.list_results(
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_results(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_runs.with_streaming_response.list_results(
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list_results(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
+ await async_client.agents.evaluation_runs.with_raw_response.list_results(
+ evaluation_run_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_results(self, async_client: AsyncGradient) -> None:
+ evaluation_run = await async_client.agents.evaluation_runs.retrieve_results(
+ prompt_id=1,
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve_results(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_runs.with_raw_response.retrieve_results(
+ prompt_id=1,
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve_results(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_runs.with_streaming_response.retrieve_results(
+ prompt_id=1,
+ evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve_results(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
+ await async_client.agents.evaluation_runs.with_raw_response.retrieve_results(
+ prompt_id=1,
+ evaluation_run_uuid="",
+ )
diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py
new file mode 100644
index 00000000..03c9af54
--- /dev/null
+++ b/tests/api_resources/agents/test_evaluation_test_cases.py
@@ -0,0 +1,510 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents import (
+ EvaluationTestCaseListResponse,
+ EvaluationTestCaseCreateResponse,
+ EvaluationTestCaseUpdateResponse,
+ EvaluationTestCaseRetrieveResponse,
+ EvaluationTestCaseListEvaluationRunsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestEvaluationTestCases:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ evaluation_test_case = client.agents.evaluation_test_cases.create()
+ assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ evaluation_test_case = client.agents.evaluation_test_cases.create(
+ agent_workspace_name="example name",
+ dataset_uuid="123e4567-e89b-12d3-a456-426614174000",
+ description="example string",
+ metrics=["example string"],
+ name="example name",
+ star_metric={
+ "metric_uuid": "123e4567-e89b-12d3-a456-426614174000",
+ "name": "example name",
+ "success_threshold": 123,
+ "success_threshold_pct": 123,
+ },
+ workspace_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.evaluation_test_cases.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_test_case = response.parse()
+ assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.evaluation_test_cases.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_test_case = response.parse()
+ assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ evaluation_test_case = client.agents.evaluation_test_cases.retrieve(
+ test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_with_all_params(self, client: Gradient) -> None:
+ evaluation_test_case = client.agents.evaluation_test_cases.retrieve(
+ test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ evaluation_test_case_version=0,
+ )
+ assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.agents.evaluation_test_cases.with_raw_response.retrieve(
+ test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_test_case = response.parse()
+ assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.agents.evaluation_test_cases.with_streaming_response.retrieve(
+ test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_test_case = response.parse()
+ assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"):
+ client.agents.evaluation_test_cases.with_raw_response.retrieve(
+ test_case_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ evaluation_test_case = client.agents.evaluation_test_cases.update(
+ path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ evaluation_test_case = client.agents.evaluation_test_cases.update(
+ path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ dataset_uuid="123e4567-e89b-12d3-a456-426614174000",
+ description="example string",
+ metrics={"metric_uuids": ["example string"]},
+ name="example name",
+ star_metric={
+ "metric_uuid": "123e4567-e89b-12d3-a456-426614174000",
+ "name": "example name",
+ "success_threshold": 123,
+ "success_threshold_pct": 123,
+ },
+ body_test_case_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.agents.evaluation_test_cases.with_raw_response.update(
+ path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_test_case = response.parse()
+ assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.agents.evaluation_test_cases.with_streaming_response.update(
+ path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_test_case = response.parse()
+ assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"):
+ client.agents.evaluation_test_cases.with_raw_response.update(
+ path_test_case_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ evaluation_test_case = client.agents.evaluation_test_cases.list()
+ assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.agents.evaluation_test_cases.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_test_case = response.parse()
+ assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.agents.evaluation_test_cases.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_test_case = response.parse()
+ assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_evaluation_runs(self, client: Gradient) -> None:
+ evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs(
+ evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_evaluation_runs_with_all_params(self, client: Gradient) -> None:
+ evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs(
+ evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ evaluation_test_case_version=0,
+ )
+ assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_evaluation_runs(self, client: Gradient) -> None:
+ response = client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs(
+ evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_test_case = response.parse()
+ assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_evaluation_runs(self, client: Gradient) -> None:
+ with client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs(
+ evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_test_case = response.parse()
+ assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list_evaluation_runs(self, client: Gradient) -> None:
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''"
+ ):
+ client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs(
+ evaluation_test_case_uuid="",
+ )
+
+
+class TestAsyncEvaluationTestCases:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.create()
+ assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.create(
+ agent_workspace_name="example name",
+ dataset_uuid="123e4567-e89b-12d3-a456-426614174000",
+ description="example string",
+ metrics=["example string"],
+ name="example name",
+ star_metric={
+ "metric_uuid": "123e4567-e89b-12d3-a456-426614174000",
+ "name": "example name",
+ "success_threshold": 123,
+ "success_threshold_pct": 123,
+ },
+ workspace_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_test_cases.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_test_case = await response.parse()
+ assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_test_cases.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_test_case = await response.parse()
+ assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve(
+ test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient) -> None:
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve(
+ test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ evaluation_test_case_version=0,
+ )
+ assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_test_cases.with_raw_response.retrieve(
+ test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_test_case = await response.parse()
+ assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_test_cases.with_streaming_response.retrieve(
+ test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_test_case = await response.parse()
+ assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"):
+ await async_client.agents.evaluation_test_cases.with_raw_response.retrieve(
+ test_case_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.update(
+ path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.update(
+ path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ dataset_uuid="123e4567-e89b-12d3-a456-426614174000",
+ description="example string",
+ metrics={"metric_uuids": ["example string"]},
+ name="example name",
+ star_metric={
+ "metric_uuid": "123e4567-e89b-12d3-a456-426614174000",
+ "name": "example name",
+ "success_threshold": 123,
+ "success_threshold_pct": 123,
+ },
+ body_test_case_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_test_cases.with_raw_response.update(
+ path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_test_case = await response.parse()
+ assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_test_cases.with_streaming_response.update(
+ path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_test_case = await response.parse()
+ assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"):
+ await async_client.agents.evaluation_test_cases.with_raw_response.update(
+ path_test_case_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.list()
+ assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_test_cases.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_test_case = await response.parse()
+ assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_test_cases.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_test_case = await response.parse()
+ assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_evaluation_runs(self, async_client: AsyncGradient) -> None:
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs(
+ evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradient) -> None:
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs(
+ evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ evaluation_test_case_version=0,
+ )
+ assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs(
+ evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_test_case = await response.parse()
+ assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs(
+ evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_test_case = await response.parse()
+ assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list_evaluation_runs(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''"
+ ):
+ await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs(
+ evaluation_test_case_uuid="",
+ )
diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py
new file mode 100644
index 00000000..6b1fb5a0
--- /dev/null
+++ b/tests/api_resources/agents/test_functions.py
@@ -0,0 +1,384 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents import (
+ FunctionCreateResponse,
+ FunctionDeleteResponse,
+ FunctionUpdateResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestFunctions:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ function = client.agents.functions.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(FunctionCreateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ function = client.agents.functions.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ description='"My Function Description"',
+ faas_name='"my-function"',
+ faas_namespace='"default"',
+ function_name='"My Function"',
+ input_schema={},
+ output_schema={},
+ )
+ assert_matches_type(FunctionCreateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.functions.with_raw_response.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ function = response.parse()
+ assert_matches_type(FunctionCreateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.functions.with_streaming_response.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ function = response.parse()
+ assert_matches_type(FunctionCreateResponse, function, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_create(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"):
+ client.agents.functions.with_raw_response.create(
+ path_agent_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ function = client.agents.functions.update(
+ path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(FunctionUpdateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ function = client.agents.functions.update(
+ path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ description='"My Function Description"',
+ faas_name='"my-function"',
+ faas_namespace='"default"',
+ function_name='"My Function"',
+ body_function_uuid='"12345678-1234-1234-1234-123456789012"',
+ input_schema={},
+ output_schema={},
+ )
+ assert_matches_type(FunctionUpdateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.agents.functions.with_raw_response.update(
+ path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ function = response.parse()
+ assert_matches_type(FunctionUpdateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.agents.functions.with_streaming_response.update(
+ path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ function = response.parse()
+ assert_matches_type(FunctionUpdateResponse, function, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"):
+ client.agents.functions.with_raw_response.update(
+ path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"):
+ client.agents.functions.with_raw_response.update(
+ path_function_uuid="",
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ function = client.agents.functions.delete(
+ function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(FunctionDeleteResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.agents.functions.with_raw_response.delete(
+ function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ function = response.parse()
+ assert_matches_type(FunctionDeleteResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.agents.functions.with_streaming_response.delete(
+ function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ function = response.parse()
+ assert_matches_type(FunctionDeleteResponse, function, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ client.agents.functions.with_raw_response.delete(
+ function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"):
+ client.agents.functions.with_raw_response.delete(
+ function_uuid="",
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+
+class TestAsyncFunctions:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ function = await async_client.agents.functions.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(FunctionCreateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ function = await async_client.agents.functions.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ description='"My Function Description"',
+ faas_name='"my-function"',
+ faas_namespace='"default"',
+ function_name='"My Function"',
+ input_schema={},
+ output_schema={},
+ )
+ assert_matches_type(FunctionCreateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.functions.with_raw_response.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ function = await response.parse()
+ assert_matches_type(FunctionCreateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.functions.with_streaming_response.create(
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ function = await response.parse()
+ assert_matches_type(FunctionCreateResponse, function, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"):
+ await async_client.agents.functions.with_raw_response.create(
+ path_agent_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ function = await async_client.agents.functions.update(
+ path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(FunctionUpdateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ function = await async_client.agents.functions.update(
+ path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ description='"My Function Description"',
+ faas_name='"my-function"',
+ faas_namespace='"default"',
+ function_name='"My Function"',
+ body_function_uuid='"12345678-1234-1234-1234-123456789012"',
+ input_schema={},
+ output_schema={},
+ )
+ assert_matches_type(FunctionUpdateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.functions.with_raw_response.update(
+ path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ function = await response.parse()
+ assert_matches_type(FunctionUpdateResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.functions.with_streaming_response.update(
+ path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ function = await response.parse()
+ assert_matches_type(FunctionUpdateResponse, function, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"):
+ await async_client.agents.functions.with_raw_response.update(
+ path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"):
+ await async_client.agents.functions.with_raw_response.update(
+ path_function_uuid="",
+ path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ function = await async_client.agents.functions.delete(
+ function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(FunctionDeleteResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.functions.with_raw_response.delete(
+ function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ function = await response.parse()
+ assert_matches_type(FunctionDeleteResponse, function, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.functions.with_streaming_response.delete(
+ function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ function = await response.parse()
+ assert_matches_type(FunctionDeleteResponse, function, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ await async_client.agents.functions.with_raw_response.delete(
+ function_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"):
+ await async_client.agents.functions.with_raw_response.delete(
+ function_uuid="",
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py
new file mode 100644
index 00000000..c773fd94
--- /dev/null
+++ b/tests/api_resources/agents/test_knowledge_bases.py
@@ -0,0 +1,316 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestKnowledgeBases:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_attach(self, client: Gradient) -> None:
+ knowledge_base = client.agents.knowledge_bases.attach(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_attach(self, client: Gradient) -> None:
+ response = client.agents.knowledge_bases.with_raw_response.attach(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = response.parse()
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_attach(self, client: Gradient) -> None:
+ with client.agents.knowledge_bases.with_streaming_response.attach(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = response.parse()
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_attach(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ client.agents.knowledge_bases.with_raw_response.attach(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_attach_single(self, client: Gradient) -> None:
+ knowledge_base = client.agents.knowledge_bases.attach_single(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_attach_single(self, client: Gradient) -> None:
+ response = client.agents.knowledge_bases.with_raw_response.attach_single(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = response.parse()
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_attach_single(self, client: Gradient) -> None:
+ with client.agents.knowledge_bases.with_streaming_response.attach_single(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = response.parse()
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_attach_single(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ client.agents.knowledge_bases.with_raw_response.attach_single(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ client.agents.knowledge_bases.with_raw_response.attach_single(
+ knowledge_base_uuid="",
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_detach(self, client: Gradient) -> None:
+ knowledge_base = client.agents.knowledge_bases.detach(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_detach(self, client: Gradient) -> None:
+ response = client.agents.knowledge_bases.with_raw_response.detach(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_detach(self, client: Gradient) -> None:
+ with client.agents.knowledge_bases.with_streaming_response.detach(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_detach(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ client.agents.knowledge_bases.with_raw_response.detach(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ client.agents.knowledge_bases.with_raw_response.detach(
+ knowledge_base_uuid="",
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+
+class TestAsyncKnowledgeBases:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_attach(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.agents.knowledge_bases.attach(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_attach(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.knowledge_bases.with_raw_response.attach(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = await response.parse()
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_attach(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.knowledge_bases.with_streaming_response.attach(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = await response.parse()
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_attach(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ await async_client.agents.knowledge_bases.with_raw_response.attach(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_attach_single(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.agents.knowledge_bases.attach_single(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_attach_single(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.knowledge_bases.with_raw_response.attach_single(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = await response.parse()
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_attach_single(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.knowledge_bases.with_streaming_response.attach_single(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = await response.parse()
+ assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_attach_single(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ await async_client.agents.knowledge_bases.with_raw_response.attach_single(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ await async_client.agents.knowledge_bases.with_raw_response.attach_single(
+ knowledge_base_uuid="",
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_detach(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.agents.knowledge_bases.detach(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_detach(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.knowledge_bases.with_raw_response.detach(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_detach(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.knowledge_bases.with_streaming_response.detach(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_detach(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"):
+ await async_client.agents.knowledge_bases.with_raw_response.detach(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ await async_client.agents.knowledge_bases.with_raw_response.detach(
+ knowledge_base_uuid="",
+ agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py
new file mode 100644
index 00000000..4edc5f73
--- /dev/null
+++ b/tests/api_resources/agents/test_routes.py
@@ -0,0 +1,487 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents import (
+ RouteAddResponse,
+ RouteViewResponse,
+ RouteDeleteResponse,
+ RouteUpdateResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRoutes:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ route = client.agents.routes.update(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ route = client.agents.routes.update(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ if_case='"use this to get weather information"',
+ body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ route_name='"weather_route"',
+ uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.agents.routes.with_raw_response.update(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ route = response.parse()
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.agents.routes.with_streaming_response.update(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ route = response.parse()
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''"
+ ):
+ client.agents.routes.with_raw_response.update(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"):
+ client.agents.routes.with_raw_response.update(
+ path_child_agent_uuid="",
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ route = client.agents.routes.delete(
+ child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.agents.routes.with_raw_response.delete(
+ child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ route = response.parse()
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.agents.routes.with_streaming_response.delete(
+ child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ route = response.parse()
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"):
+ client.agents.routes.with_raw_response.delete(
+ child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ parent_agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"):
+ client.agents.routes.with_raw_response.delete(
+ child_agent_uuid="",
+ parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_add(self, client: Gradient) -> None:
+ route = client.agents.routes.add(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(RouteAddResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_add_with_all_params(self, client: Gradient) -> None:
+ route = client.agents.routes.add(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ if_case='"use this to get weather information"',
+ body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ route_name='"weather_route"',
+ )
+ assert_matches_type(RouteAddResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_add(self, client: Gradient) -> None:
+ response = client.agents.routes.with_raw_response.add(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ route = response.parse()
+ assert_matches_type(RouteAddResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_add(self, client: Gradient) -> None:
+ with client.agents.routes.with_streaming_response.add(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ route = response.parse()
+ assert_matches_type(RouteAddResponse, route, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_add(self, client: Gradient) -> None:
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''"
+ ):
+ client.agents.routes.with_raw_response.add(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"):
+ client.agents.routes.with_raw_response.add(
+ path_child_agent_uuid="",
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_view(self, client: Gradient) -> None:
+ route = client.agents.routes.view(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(RouteViewResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_view(self, client: Gradient) -> None:
+ response = client.agents.routes.with_raw_response.view(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ route = response.parse()
+ assert_matches_type(RouteViewResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_view(self, client: Gradient) -> None:
+ with client.agents.routes.with_streaming_response.view(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ route = response.parse()
+ assert_matches_type(RouteViewResponse, route, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_view(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.routes.with_raw_response.view(
+ "",
+ )
+
+
+class TestAsyncRoutes:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ route = await async_client.agents.routes.update(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ route = await async_client.agents.routes.update(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ if_case='"use this to get weather information"',
+ body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ route_name='"weather_route"',
+ uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.routes.with_raw_response.update(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ route = await response.parse()
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.routes.with_streaming_response.update(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ route = await response.parse()
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''"
+ ):
+ await async_client.agents.routes.with_raw_response.update(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"):
+ await async_client.agents.routes.with_raw_response.update(
+ path_child_agent_uuid="",
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ route = await async_client.agents.routes.delete(
+ child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.routes.with_raw_response.delete(
+ child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ route = await response.parse()
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.routes.with_streaming_response.delete(
+ child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ route = await response.parse()
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"):
+ await async_client.agents.routes.with_raw_response.delete(
+ child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ parent_agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"):
+ await async_client.agents.routes.with_raw_response.delete(
+ child_agent_uuid="",
+ parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_add(self, async_client: AsyncGradient) -> None:
+ route = await async_client.agents.routes.add(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(RouteAddResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_add_with_all_params(self, async_client: AsyncGradient) -> None:
+ route = await async_client.agents.routes.add(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ if_case='"use this to get weather information"',
+ body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"',
+ route_name='"weather_route"',
+ )
+ assert_matches_type(RouteAddResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_add(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.routes.with_raw_response.add(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ route = await response.parse()
+ assert_matches_type(RouteAddResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.routes.with_streaming_response.add(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ route = await response.parse()
+ assert_matches_type(RouteAddResponse, route, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_add(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''"
+ ):
+ await async_client.agents.routes.with_raw_response.add(
+ path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ path_parent_agent_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"):
+ await async_client.agents.routes.with_raw_response.add(
+ path_child_agent_uuid="",
+ path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_view(self, async_client: AsyncGradient) -> None:
+ route = await async_client.agents.routes.view(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(RouteViewResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_view(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.routes.with_raw_response.view(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ route = await response.parse()
+ assert_matches_type(RouteViewResponse, route, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_view(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.routes.with_streaming_response.view(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ route = await response.parse()
+ assert_matches_type(RouteViewResponse, route, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_view(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.routes.with_raw_response.view(
+ "",
+ )
diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py
new file mode 100644
index 00000000..ab10c5e4
--- /dev/null
+++ b/tests/api_resources/agents/test_versions.py
@@ -0,0 +1,232 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents import VersionListResponse, VersionUpdateResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestVersions:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ version = client.agents.versions.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(VersionUpdateResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ version = client.agents.versions.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_uuid='"12345678-1234-1234-1234-123456789012"',
+ version_hash="c3658d8b5c05494cd03ce042926ef08157889ed54b1b74b5ee0b3d66dcee4b73",
+ )
+ assert_matches_type(VersionUpdateResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.agents.versions.with_raw_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ version = response.parse()
+ assert_matches_type(VersionUpdateResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.agents.versions.with_streaming_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ version = response.parse()
+ assert_matches_type(VersionUpdateResponse, version, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
+ client.agents.versions.with_raw_response.update(
+ path_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ version = client.agents.versions.list(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(VersionListResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ version = client.agents.versions.list(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(VersionListResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.agents.versions.with_raw_response.list(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ version = response.parse()
+ assert_matches_type(VersionListResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.agents.versions.with_streaming_response.list(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ version = response.parse()
+ assert_matches_type(VersionListResponse, version, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.versions.with_raw_response.list(
+ uuid="",
+ )
+
+
+class TestAsyncVersions:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ version = await async_client.agents.versions.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(VersionUpdateResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ version = await async_client.agents.versions.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_uuid='"12345678-1234-1234-1234-123456789012"',
+ version_hash="c3658d8b5c05494cd03ce042926ef08157889ed54b1b74b5ee0b3d66dcee4b73",
+ )
+ assert_matches_type(VersionUpdateResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.versions.with_raw_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ version = await response.parse()
+ assert_matches_type(VersionUpdateResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.versions.with_streaming_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ version = await response.parse()
+ assert_matches_type(VersionUpdateResponse, version, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
+ await async_client.agents.versions.with_raw_response.update(
+ path_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ version = await async_client.agents.versions.list(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(VersionListResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ version = await async_client.agents.versions.list(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(VersionListResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.versions.with_raw_response.list(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ version = await response.parse()
+ assert_matches_type(VersionListResponse, version, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.versions.with_streaming_response.list(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ version = await response.parse()
+ assert_matches_type(VersionListResponse, version, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.versions.with_raw_response.list(
+ uuid="",
+ )
diff --git a/tests/api_resources/vector_stores/__init__.py b/tests/api_resources/apps/__init__.py
similarity index 100%
rename from tests/api_resources/vector_stores/__init__.py
rename to tests/api_resources/apps/__init__.py
diff --git a/tests/api_resources/apps/test_job_invocations.py b/tests/api_resources/apps/test_job_invocations.py
new file mode 100644
index 00000000..388be266
--- /dev/null
+++ b/tests/api_resources/apps/test_job_invocations.py
@@ -0,0 +1,148 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.apps import JobInvocationCancelResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestJobInvocations:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_cancel(self, client: Gradient) -> None:
+ job_invocation = client.apps.job_invocations.cancel(
+ job_invocation_id="123e4567-e89b-12d3-a456-426",
+ app_id="4f6c71e2-1e90-4762-9fee-6cc4a0a9f2cf",
+ )
+ assert_matches_type(JobInvocationCancelResponse, job_invocation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_cancel_with_all_params(self, client: Gradient) -> None:
+ job_invocation = client.apps.job_invocations.cancel(
+ job_invocation_id="123e4567-e89b-12d3-a456-426",
+ app_id="4f6c71e2-1e90-4762-9fee-6cc4a0a9f2cf",
+ job_name="job_name",
+ )
+ assert_matches_type(JobInvocationCancelResponse, job_invocation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_cancel(self, client: Gradient) -> None:
+ response = client.apps.job_invocations.with_raw_response.cancel(
+ job_invocation_id="123e4567-e89b-12d3-a456-426",
+ app_id="4f6c71e2-1e90-4762-9fee-6cc4a0a9f2cf",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ job_invocation = response.parse()
+ assert_matches_type(JobInvocationCancelResponse, job_invocation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_cancel(self, client: Gradient) -> None:
+ with client.apps.job_invocations.with_streaming_response.cancel(
+ job_invocation_id="123e4567-e89b-12d3-a456-426",
+ app_id="4f6c71e2-1e90-4762-9fee-6cc4a0a9f2cf",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ job_invocation = response.parse()
+ assert_matches_type(JobInvocationCancelResponse, job_invocation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_cancel(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `app_id` but received ''"):
+ client.apps.job_invocations.with_raw_response.cancel(
+ job_invocation_id="123e4567-e89b-12d3-a456-426",
+ app_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_invocation_id` but received ''"):
+ client.apps.job_invocations.with_raw_response.cancel(
+ job_invocation_id="",
+ app_id="4f6c71e2-1e90-4762-9fee-6cc4a0a9f2cf",
+ )
+
+
+class TestAsyncJobInvocations:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_cancel(self, async_client: AsyncGradient) -> None:
+ job_invocation = await async_client.apps.job_invocations.cancel(
+ job_invocation_id="123e4567-e89b-12d3-a456-426",
+ app_id="4f6c71e2-1e90-4762-9fee-6cc4a0a9f2cf",
+ )
+ assert_matches_type(JobInvocationCancelResponse, job_invocation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_cancel_with_all_params(self, async_client: AsyncGradient) -> None:
+ job_invocation = await async_client.apps.job_invocations.cancel(
+ job_invocation_id="123e4567-e89b-12d3-a456-426",
+ app_id="4f6c71e2-1e90-4762-9fee-6cc4a0a9f2cf",
+ job_name="job_name",
+ )
+ assert_matches_type(JobInvocationCancelResponse, job_invocation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_cancel(self, async_client: AsyncGradient) -> None:
+ response = await async_client.apps.job_invocations.with_raw_response.cancel(
+ job_invocation_id="123e4567-e89b-12d3-a456-426",
+ app_id="4f6c71e2-1e90-4762-9fee-6cc4a0a9f2cf",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ job_invocation = await response.parse()
+ assert_matches_type(JobInvocationCancelResponse, job_invocation, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_cancel(self, async_client: AsyncGradient) -> None:
+ async with async_client.apps.job_invocations.with_streaming_response.cancel(
+ job_invocation_id="123e4567-e89b-12d3-a456-426",
+ app_id="4f6c71e2-1e90-4762-9fee-6cc4a0a9f2cf",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ job_invocation = await response.parse()
+ assert_matches_type(JobInvocationCancelResponse, job_invocation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_cancel(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `app_id` but received ''"):
+ await async_client.apps.job_invocations.with_raw_response.cancel(
+ job_invocation_id="123e4567-e89b-12d3-a456-426",
+ app_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_invocation_id` but received ''"):
+ await async_client.apps.job_invocations.with_raw_response.cancel(
+ job_invocation_id="",
+ app_id="4f6c71e2-1e90-4762-9fee-6cc4a0a9f2cf",
+ )
diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py
index b065b83d..54e98640 100644
--- a/tests/api_resources/chat/test_completions.py
+++ b/tests/api_resources/chat/test_completions.py
@@ -7,14 +7,9 @@
import pytest
+from gradient import Gradient, AsyncGradient
from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.chat import (
- CreateResponse,
- CompletionListResponse,
- CompletionDeleteResponse,
- CompletionListMessagesResponse,
-)
+from gradient.types.chat import CompletionCreateResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -22,65 +17,42 @@
class TestCompletions:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_method_create_overload_1(self, client: Gradient) -> None:
completion = client.chat.completions.create(
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
+ model="llama3-8b-instruct",
)
- assert_matches_type(CreateResponse, completion, path=["response"])
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None:
completion = client.chat.completions.create(
messages=[
{
"content": "string",
- "role": "developer",
- "name": "name",
+ "role": "system",
}
],
- model="gpt-4o",
- audio={
- "format": "wav",
- "voice": "ash",
- },
+ model="llama3-8b-instruct",
frequency_penalty=-2,
- function_call="none",
- functions=[
- {
- "name": "name",
- "description": "description",
- "parameters": {"foo": "bar"},
- }
- ],
logit_bias={"foo": 0},
logprobs=True,
- max_completion_tokens=0,
+ max_completion_tokens=256,
max_tokens=0,
metadata={"foo": "string"},
- modalities=["text"],
n=1,
- parallel_tool_calls=True,
- prediction={
- "content": "string",
- "type": "content",
- },
presence_penalty=-2,
- reasoning_effort="low",
- response_format={"type": "text"},
- seed=0,
- service_tier="auto",
+ reasoning_effort="none",
stop="\n",
- store=True,
- stream=True,
+ stream=False,
stream_options={"include_usage": True},
temperature=1,
tool_choice="none",
@@ -90,7 +62,6 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No
"name": "name",
"description": "description",
"parameters": {"foo": "bar"},
- "strict": True,
},
"type": "function",
}
@@ -98,345 +69,184 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No
top_logprobs=0,
top_p=1,
user="user-1234",
- web_search_options={
- "search_context_size": "low",
- "user_location": {
- "approximate": {
- "city": "city",
- "country": "country",
- "region": "region",
- "timezone": "timezone",
- },
- "type": "approximate",
- },
- },
)
- assert_matches_type(CreateResponse, completion, path=["response"])
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_raw_response_create_overload_1(self, client: Gradient) -> None:
response = client.chat.completions.with_raw_response.create(
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
+ model="llama3-8b-instruct",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
with client.chat.completions.with_streaming_response.create(
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.retrieve(
- "completion_id",
- )
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.chat.completions.with_raw_response.retrieve(
- "completion_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.chat.completions.with_streaming_response.retrieve(
- "completion_id",
+ model="llama3-8b-instruct",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- client.chat.completions.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.chat.completions.with_raw_response.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
+ def test_method_create_overload_2(self, client: Gradient) -> None:
+ completion_stream = client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
)
+ completion_stream.response.close()
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.chat.completions.with_streaming_response.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- client.chat.completions.with_raw_response.update(
- completion_id="",
- metadata={"foo": "string"},
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.list()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.list(
- after="after",
- limit=0,
+ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None:
+ completion_stream = client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
+ frequency_penalty=-2,
+ logit_bias={"foo": 0},
+ logprobs=True,
+ max_completion_tokens=256,
+ max_tokens=0,
metadata={"foo": "string"},
- model="model",
- order="asc",
- )
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.chat.completions.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.chat.completions.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.delete(
- "completion_id",
- )
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.chat.completions.with_raw_response.delete(
- "completion_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.chat.completions.with_streaming_response.delete(
- "completion_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- client.chat.completions.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_messages(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.list_messages(
- completion_id="completion_id",
- )
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_messages_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.chat.completions.list_messages(
- completion_id="completion_id",
- after="after",
- limit=0,
- order="asc",
+ n=1,
+ presence_penalty=-2,
+ reasoning_effort="none",
+ stop="\n",
+ stream_options={"include_usage": True},
+ temperature=1,
+ tool_choice="none",
+ tools=[
+ {
+ "function": {
+ "name": "name",
+ "description": "description",
+ "parameters": {"foo": "bar"},
+ },
+ "type": "function",
+ }
+ ],
+ top_logprobs=0,
+ top_p=1,
+ user="user-1234",
)
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
+ completion_stream.response.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_raw_response_list_messages(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.chat.completions.with_raw_response.list_messages(
- completion_id="completion_id",
+ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
+ response = client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
)
- assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
+ stream = response.parse()
+ stream.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_streaming_response_list_messages(self, client: DigitaloceanGenaiSDK) -> None:
- with client.chat.completions.with_streaming_response.list_messages(
- completion_id="completion_id",
+ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
+ with client.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
+ stream = response.parse()
+ stream.close()
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list_messages(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- client.chat.completions.with_raw_response.list_messages(
- completion_id="",
- )
-
class TestAsyncCompletions:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
completion = await async_client.chat.completions.create(
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
+ model="llama3-8b-instruct",
)
- assert_matches_type(CreateResponse, completion, path=["response"])
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
completion = await async_client.chat.completions.create(
messages=[
{
"content": "string",
- "role": "developer",
- "name": "name",
+ "role": "system",
}
],
- model="gpt-4o",
- audio={
- "format": "wav",
- "voice": "ash",
- },
+ model="llama3-8b-instruct",
frequency_penalty=-2,
- function_call="none",
- functions=[
- {
- "name": "name",
- "description": "description",
- "parameters": {"foo": "bar"},
- }
- ],
logit_bias={"foo": 0},
logprobs=True,
- max_completion_tokens=0,
+ max_completion_tokens=256,
max_tokens=0,
metadata={"foo": "string"},
- modalities=["text"],
n=1,
- parallel_tool_calls=True,
- prediction={
- "content": "string",
- "type": "content",
- },
presence_penalty=-2,
- reasoning_effort="low",
- response_format={"type": "text"},
- seed=0,
- service_tier="auto",
+ reasoning_effort="none",
stop="\n",
- store=True,
- stream=True,
+ stream=False,
stream_options={"include_usage": True},
temperature=1,
tool_choice="none",
@@ -446,7 +256,6 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce
"name": "name",
"description": "description",
"parameters": {"foo": "bar"},
- "strict": True,
},
"type": "function",
}
@@ -454,278 +263,138 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce
top_logprobs=0,
top_p=1,
user="user-1234",
- web_search_options={
- "search_context_size": "low",
- "user_location": {
- "approximate": {
- "city": "city",
- "country": "country",
- "region": "region",
- "timezone": "timezone",
- },
- "type": "approximate",
- },
- },
)
- assert_matches_type(CreateResponse, completion, path=["response"])
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
response = await async_client.chat.completions.with_raw_response.create(
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
+ model="llama3-8b-instruct",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = await response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
async with async_client.chat.completions.with_streaming_response.create(
messages=[
{
"content": "string",
- "role": "developer",
+ "role": "system",
}
],
- model="gpt-4o",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.retrieve(
- "completion_id",
- )
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.chat.completions.with_raw_response.retrieve(
- "completion_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.chat.completions.with_streaming_response.retrieve(
- "completion_id",
+ model="llama3-8b-instruct",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = await response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
+ assert_matches_type(CompletionCreateResponse, completion, path=["response"])
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- await async_client.chat.completions.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.chat.completions.with_raw_response.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
+ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
+ completion_stream = await async_client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
)
+ await completion_stream.response.aclose()
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.chat.completions.with_streaming_response.update(
- completion_id="completion_id",
- metadata={"foo": "string"},
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- await async_client.chat.completions.with_raw_response.update(
- completion_id="",
- metadata={"foo": "string"},
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.list()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.list(
- after="after",
- limit=0,
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ completion_stream = await async_client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
+ frequency_penalty=-2,
+ logit_bias={"foo": 0},
+ logprobs=True,
+ max_completion_tokens=256,
+ max_tokens=0,
metadata={"foo": "string"},
- model="model",
- order="asc",
- )
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.chat.completions.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.chat.completions.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CompletionListResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.delete(
- "completion_id",
- )
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.chat.completions.with_raw_response.delete(
- "completion_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.chat.completions.with_streaming_response.delete(
- "completion_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CompletionDeleteResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- await async_client.chat.completions.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.list_messages(
- completion_id="completion_id",
- )
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_messages_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.chat.completions.list_messages(
- completion_id="completion_id",
- after="after",
- limit=0,
- order="asc",
+ n=1,
+ presence_penalty=-2,
+ reasoning_effort="none",
+ stop="\n",
+ stream_options={"include_usage": True},
+ temperature=1,
+ tool_choice="none",
+ tools=[
+ {
+ "function": {
+ "name": "name",
+ "description": "description",
+ "parameters": {"foo": "bar"},
+ },
+ "type": "function",
+ }
+ ],
+ top_logprobs=0,
+ top_p=1,
+ user="user-1234",
)
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
+ await completion_stream.response.aclose()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_raw_response_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.chat.completions.with_raw_response.list_messages(
- completion_id="completion_id",
+ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
)
- assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
+ stream = await response.parse()
+ await stream.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_streaming_response_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.chat.completions.with_streaming_response.list_messages(
- completion_id="completion_id",
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CompletionListMessagesResponse, completion, path=["response"])
+ stream = await response.parse()
+ await stream.close()
assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
- await async_client.chat.completions.with_raw_response.list_messages(
- completion_id="",
- )
diff --git a/tests/api_resources/databases/__init__.py b/tests/api_resources/databases/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/databases/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/databases/schema_registry/__init__.py b/tests/api_resources/databases/schema_registry/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/databases/schema_registry/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/databases/schema_registry/test_config.py b/tests/api_resources/databases/schema_registry/test_config.py
new file mode 100644
index 00000000..c58a6ad9
--- /dev/null
+++ b/tests/api_resources/databases/schema_registry/test_config.py
@@ -0,0 +1,423 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.databases.schema_registry import (
+ ConfigUpdateResponse,
+ ConfigRetrieveResponse,
+ ConfigUpdateSubjectResponse,
+ ConfigRetrieveSubjectResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestConfig:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ config = client.databases.schema_registry.config.retrieve(
+ "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.databases.schema_registry.config.with_raw_response.retrieve(
+ "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = response.parse()
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.databases.schema_registry.config.with_streaming_response.retrieve(
+ "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = response.parse()
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ config = client.databases.schema_registry.config.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.databases.schema_registry.config.with_raw_response.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = response.parse()
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.databases.schema_registry.config.with_streaming_response.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = response.parse()
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.update(
+ database_cluster_uuid="",
+ compatibility_level="BACKWARD",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_subject(self, client: Gradient) -> None:
+ config = client.databases.schema_registry.config.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve_subject(self, client: Gradient) -> None:
+ response = client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = response.parse()
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve_subject(self, client: Gradient) -> None:
+ with client.databases.schema_registry.config.with_streaming_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = response.parse()
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve_subject(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_subject(self, client: Gradient) -> None:
+ config = client.databases.schema_registry.config.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update_subject(self, client: Gradient) -> None:
+ response = client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = response.parse()
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update_subject(self, client: Gradient) -> None:
+ with client.databases.schema_registry.config.with_streaming_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = response.parse()
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update_subject(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="",
+ compatibility_level="BACKWARD",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+
+
+class TestAsyncConfig:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ config = await async_client.databases.schema_registry.config.retrieve(
+ "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.databases.schema_registry.config.with_raw_response.retrieve(
+ "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = await response.parse()
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.databases.schema_registry.config.with_streaming_response.retrieve(
+ "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = await response.parse()
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ config = await async_client.databases.schema_registry.config.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.databases.schema_registry.config.with_raw_response.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = await response.parse()
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.databases.schema_registry.config.with_streaming_response.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = await response.parse()
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.update(
+ database_cluster_uuid="",
+ compatibility_level="BACKWARD",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_subject(self, async_client: AsyncGradient) -> None:
+ config = await async_client.databases.schema_registry.config.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve_subject(self, async_client: AsyncGradient) -> None:
+ response = await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = await response.parse()
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve_subject(self, async_client: AsyncGradient) -> None:
+ async with async_client.databases.schema_registry.config.with_streaming_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = await response.parse()
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve_subject(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_subject(self, async_client: AsyncGradient) -> None:
+ config = await async_client.databases.schema_registry.config.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update_subject(self, async_client: AsyncGradient) -> None:
+ response = await async_client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = await response.parse()
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update_subject(self, async_client: AsyncGradient) -> None:
+ async with async_client.databases.schema_registry.config.with_streaming_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = await response.parse()
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update_subject(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="",
+ compatibility_level="BACKWARD",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
diff --git a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
deleted file mode 100644
index 1983d90a..00000000
--- a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
+++ /dev/null
@@ -1,309 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.fine_tuning.checkpoints import (
- PermissionDeleteResponse,
- ListFineTuningCheckpointPermission,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestPermissions:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- permission = client.fine_tuning.checkpoints.permissions.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.checkpoints.permissions.with_raw_response.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.checkpoints.permissions.with_streaming_response.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- client.fine_tuning.checkpoints.permissions.with_raw_response.create(
- permission_id="",
- project_ids=["string"],
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- permission = client.fine_tuning.checkpoints.permissions.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- permission = client.fine_tuning.checkpoints.permissions.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- order="ascending",
- project_id="project_id",
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
- permission_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- permission = client.fine_tuning.checkpoints.permissions.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- )
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = response.parse()
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.checkpoints.permissions.with_streaming_response.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = response.parse()
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncPermissions:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- permission = await async_client.fine_tuning.checkpoints.permissions.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = await response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.create(
- permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
- project_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = await response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- await async_client.fine_tuning.checkpoints.permissions.with_raw_response.create(
- permission_id="",
- project_ids=["string"],
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- order="ascending",
- project_id="project_id",
- )
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = await response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve(
- permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = await response.parse()
- assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
- permission_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- permission = await async_client.fine_tuning.checkpoints.permissions.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- )
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- permission = await response.parse()
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.delete(
- "cp_zc4Q7MP6XxulcVzj4MZdwsAB",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- permission = await response.parse()
- assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
- await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py
deleted file mode 100644
index f94416f9..00000000
--- a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.fine_tuning.jobs import CheckpointRetrieveResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestCheckpoints:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- checkpoint = client.fine_tuning.jobs.checkpoints.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- checkpoint = client.fine_tuning.jobs.checkpoints.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- )
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- checkpoint = response.parse()
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.checkpoints.with_streaming_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- checkpoint = response.parse()
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve(
- fine_tuning_job_id="",
- )
-
-
-class TestAsyncCheckpoints:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- checkpoint = await async_client.fine_tuning.jobs.checkpoints.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- checkpoint = await async_client.fine_tuning.jobs.checkpoints.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- )
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- checkpoint = await response.parse()
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.checkpoints.with_streaming_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- checkpoint = await response.parse()
- assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- await async_client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve(
- fine_tuning_job_id="",
- )
diff --git a/tests/api_resources/fine_tuning/jobs/test_events.py b/tests/api_resources/fine_tuning/jobs/test_events.py
deleted file mode 100644
index 39802767..00000000
--- a/tests/api_resources/fine_tuning/jobs/test_events.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.fine_tuning.jobs import EventRetrieveResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestEvents:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- event = client.fine_tuning.jobs.events.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- event = client.fine_tuning.jobs.events.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- )
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.events.with_raw_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- event = response.parse()
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.events.with_streaming_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- event = response.parse()
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- client.fine_tuning.jobs.events.with_raw_response.retrieve(
- fine_tuning_job_id="",
- )
-
-
-class TestAsyncEvents:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- event = await async_client.fine_tuning.jobs.events.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- event = await async_client.fine_tuning.jobs.events.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- after="after",
- limit=0,
- )
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.events.with_raw_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- event = await response.parse()
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.events.with_streaming_response.retrieve(
- fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- event = await response.parse()
- assert_matches_type(EventRetrieveResponse, event, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- await async_client.fine_tuning.jobs.events.with_raw_response.retrieve(
- fine_tuning_job_id="",
- )
diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py
deleted file mode 100644
index f0014f09..00000000
--- a/tests/api_resources/fine_tuning/test_jobs.py
+++ /dev/null
@@ -1,437 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.fine_tuning import (
- FineTuningJob,
- JobListResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestJobs:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- hyperparameters={
- "batch_size": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- },
- integrations=[
- {
- "type": "wandb",
- "wandb": {
- "project": "my-wandb-project",
- "entity": "entity",
- "name": "name",
- "tags": ["custom-tag"],
- },
- }
- ],
- metadata={"foo": "string"},
- method={
- "dpo": {
- "hyperparameters": {
- "batch_size": "auto",
- "beta": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- }
- },
- "supervised": {
- "hyperparameters": {
- "batch_size": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- }
- },
- "type": "supervised",
- },
- seed=42,
- suffix="x",
- validation_file="file-abc123",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.with_raw_response.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.with_streaming_response.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.with_raw_response.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.with_streaming_response.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- client.fine_tuning.jobs.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.list()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.list(
- after="after",
- limit=0,
- metadata={"foo": "string"},
- )
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- job = client.fine_tuning.jobs.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.fine_tuning.jobs.with_raw_response.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with client.fine_tuning.jobs.with_streaming_response.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- client.fine_tuning.jobs.with_raw_response.cancel(
- "",
- )
-
-
-class TestAsyncJobs:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- hyperparameters={
- "batch_size": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- },
- integrations=[
- {
- "type": "wandb",
- "wandb": {
- "project": "my-wandb-project",
- "entity": "entity",
- "name": "name",
- "tags": ["custom-tag"],
- },
- }
- ],
- metadata={"foo": "string"},
- method={
- "dpo": {
- "hyperparameters": {
- "batch_size": "auto",
- "beta": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- }
- },
- "supervised": {
- "hyperparameters": {
- "batch_size": "auto",
- "learning_rate_multiplier": "auto",
- "n_epochs": "auto",
- }
- },
- "type": "supervised",
- },
- seed=42,
- suffix="x",
- validation_file="file-abc123",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.with_raw_response.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.with_streaming_response.create(
- model="gpt-4o-mini",
- training_file="file-abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.with_raw_response.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.with_streaming_response.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- await async_client.fine_tuning.jobs.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.list()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.list(
- after="after",
- limit=0,
- metadata={"foo": "string"},
- )
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- job = await async_client.fine_tuning.jobs.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.fine_tuning.jobs.with_raw_response.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.fine_tuning.jobs.with_streaming_response.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(FineTuningJob, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
- await async_client.fine_tuning.jobs.with_raw_response.cancel(
- "",
- )
diff --git a/tests/api_resources/gpu_droplets/__init__.py b/tests/api_resources/gpu_droplets/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/gpu_droplets/account/__init__.py b/tests/api_resources/gpu_droplets/account/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/account/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/gpu_droplets/account/test_keys.py b/tests/api_resources/gpu_droplets/account/test_keys.py
new file mode 100644
index 00000000..42702d3a
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/account/test_keys.py
@@ -0,0 +1,399 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets.account import (
+ KeyListResponse,
+ KeyCreateResponse,
+ KeyUpdateResponse,
+ KeyRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ key = client.gpu_droplets.account.keys.create(
+ name="My SSH Public Key",
+ public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.gpu_droplets.account.keys.with_raw_response.create(
+ name="My SSH Public Key",
+ public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.gpu_droplets.account.keys.with_streaming_response.create(
+ name="My SSH Public Key",
+ public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ key = client.gpu_droplets.account.keys.retrieve(
+ 512189,
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.account.keys.with_raw_response.retrieve(
+ 512189,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.account.keys.with_streaming_response.retrieve(
+ 512189,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ key = client.gpu_droplets.account.keys.update(
+ ssh_key_identifier=512189,
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ key = client.gpu_droplets.account.keys.update(
+ ssh_key_identifier=512189,
+ name="My SSH Public Key",
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.gpu_droplets.account.keys.with_raw_response.update(
+ ssh_key_identifier=512189,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.gpu_droplets.account.keys.with_streaming_response.update(
+ ssh_key_identifier=512189,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ key = client.gpu_droplets.account.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ key = client.gpu_droplets.account.keys.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.account.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.account.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ key = client.gpu_droplets.account.keys.delete(
+ 512189,
+ )
+ assert key is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.gpu_droplets.account.keys.with_raw_response.delete(
+ 512189,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert key is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.gpu_droplets.account.keys.with_streaming_response.delete(
+ 512189,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert key is None
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ key = await async_client.gpu_droplets.account.keys.create(
+ name="My SSH Public Key",
+ public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.account.keys.with_raw_response.create(
+ name="My SSH Public Key",
+ public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.account.keys.with_streaming_response.create(
+ name="My SSH Public Key",
+ public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ key = await async_client.gpu_droplets.account.keys.retrieve(
+ 512189,
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.account.keys.with_raw_response.retrieve(
+ 512189,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.account.keys.with_streaming_response.retrieve(
+ 512189,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ key = await async_client.gpu_droplets.account.keys.update(
+ ssh_key_identifier=512189,
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ key = await async_client.gpu_droplets.account.keys.update(
+ ssh_key_identifier=512189,
+ name="My SSH Public Key",
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.account.keys.with_raw_response.update(
+ ssh_key_identifier=512189,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.account.keys.with_streaming_response.update(
+ ssh_key_identifier=512189,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ key = await async_client.gpu_droplets.account.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ key = await async_client.gpu_droplets.account.keys.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.account.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.account.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ key = await async_client.gpu_droplets.account.keys.delete(
+ 512189,
+ )
+ assert key is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.account.keys.with_raw_response.delete(
+ 512189,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert key is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.account.keys.with_streaming_response.delete(
+ 512189,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert key is None
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/gpu_droplets/firewalls/__init__.py b/tests/api_resources/gpu_droplets/firewalls/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/firewalls/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py
new file mode 100644
index 00000000..f75aeaf6
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py
@@ -0,0 +1,206 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestDroplets:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_add(self, client: Gradient) -> None:
+ droplet = client.gpu_droplets.firewalls.droplets.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ )
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_add(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.droplets.with_raw_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ droplet = response.parse()
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_add(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.droplets.with_streaming_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ droplet = response.parse()
+ assert droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_add(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ client.gpu_droplets.firewalls.droplets.with_raw_response.add(
+ firewall_id="",
+ droplet_ids=[49696269],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_remove(self, client: Gradient) -> None:
+ droplet = client.gpu_droplets.firewalls.droplets.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ )
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_remove(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.droplets.with_raw_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ droplet = response.parse()
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_remove(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.droplets.with_streaming_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ droplet = response.parse()
+ assert droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_remove(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ client.gpu_droplets.firewalls.droplets.with_raw_response.remove(
+ firewall_id="",
+ droplet_ids=[49696269],
+ )
+
+
+class TestAsyncDroplets:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_add(self, async_client: AsyncGradient) -> None:
+ droplet = await async_client.gpu_droplets.firewalls.droplets.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ )
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_add(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.droplets.with_raw_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ droplet = await response.parse()
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.droplets.with_streaming_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ droplet = await response.parse()
+ assert droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_add(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ await async_client.gpu_droplets.firewalls.droplets.with_raw_response.add(
+ firewall_id="",
+ droplet_ids=[49696269],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_remove(self, async_client: AsyncGradient) -> None:
+ droplet = await async_client.gpu_droplets.firewalls.droplets.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ )
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_remove(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.droplets.with_raw_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ droplet = await response.parse()
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.droplets.with_streaming_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ droplet_ids=[49696269],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ droplet = await response.parse()
+ assert droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_remove(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ await async_client.gpu_droplets.firewalls.droplets.with_raw_response.remove(
+ firewall_id="",
+ droplet_ids=[49696269],
+ )
diff --git a/tests/api_resources/gpu_droplets/firewalls/test_rules.py b/tests/api_resources/gpu_droplets/firewalls/test_rules.py
new file mode 100644
index 00000000..2c04b390
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/firewalls/test_rules.py
@@ -0,0 +1,326 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRules:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_add(self, client: Gradient) -> None:
+ rule = client.gpu_droplets.firewalls.rules.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_add_with_all_params(self, client: Gradient) -> None:
+ rule = client.gpu_droplets.firewalls.rules.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ inbound_rules=[
+ {
+ "ports": "3306",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [49696269],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ }
+ ],
+ outbound_rules=[
+ {
+ "destinations": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [49696269],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ "ports": "3306",
+ "protocol": "tcp",
+ }
+ ],
+ )
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_add(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.rules.with_raw_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ rule = response.parse()
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_add(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.rules.with_streaming_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ rule = response.parse()
+ assert rule is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_add(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ client.gpu_droplets.firewalls.rules.with_raw_response.add(
+ firewall_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_remove(self, client: Gradient) -> None:
+ rule = client.gpu_droplets.firewalls.rules.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_remove_with_all_params(self, client: Gradient) -> None:
+ rule = client.gpu_droplets.firewalls.rules.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ inbound_rules=[
+ {
+ "ports": "3306",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [49696269],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ }
+ ],
+ outbound_rules=[
+ {
+ "destinations": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [49696269],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ "ports": "3306",
+ "protocol": "tcp",
+ }
+ ],
+ )
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_remove(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.rules.with_raw_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ rule = response.parse()
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_remove(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.rules.with_streaming_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ rule = response.parse()
+ assert rule is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_remove(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ client.gpu_droplets.firewalls.rules.with_raw_response.remove(
+ firewall_id="",
+ )
+
+
+class TestAsyncRules:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_add(self, async_client: AsyncGradient) -> None:
+ rule = await async_client.gpu_droplets.firewalls.rules.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_add_with_all_params(self, async_client: AsyncGradient) -> None:
+ rule = await async_client.gpu_droplets.firewalls.rules.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ inbound_rules=[
+ {
+ "ports": "3306",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [49696269],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ }
+ ],
+ outbound_rules=[
+ {
+ "destinations": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [49696269],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ "ports": "3306",
+ "protocol": "tcp",
+ }
+ ],
+ )
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_add(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.rules.with_raw_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ rule = await response.parse()
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.rules.with_streaming_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ rule = await response.parse()
+ assert rule is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_add(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ await async_client.gpu_droplets.firewalls.rules.with_raw_response.add(
+ firewall_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_remove(self, async_client: AsyncGradient) -> None:
+ rule = await async_client.gpu_droplets.firewalls.rules.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_remove_with_all_params(self, async_client: AsyncGradient) -> None:
+ rule = await async_client.gpu_droplets.firewalls.rules.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ inbound_rules=[
+ {
+ "ports": "3306",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [49696269],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ }
+ ],
+ outbound_rules=[
+ {
+ "destinations": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [49696269],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ "ports": "3306",
+ "protocol": "tcp",
+ }
+ ],
+ )
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_remove(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.rules.with_raw_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ rule = await response.parse()
+ assert rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.rules.with_streaming_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ rule = await response.parse()
+ assert rule is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_remove(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ await async_client.gpu_droplets.firewalls.rules.with_raw_response.remove(
+ firewall_id="",
+ )
diff --git a/tests/api_resources/gpu_droplets/firewalls/test_tags.py b/tests/api_resources/gpu_droplets/firewalls/test_tags.py
new file mode 100644
index 00000000..170c5317
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/firewalls/test_tags.py
@@ -0,0 +1,206 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestTags:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_add(self, client: Gradient) -> None:
+ tag = client.gpu_droplets.firewalls.tags.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ )
+ assert tag is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_add(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.tags.with_raw_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tag = response.parse()
+ assert tag is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_add(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.tags.with_streaming_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tag = response.parse()
+ assert tag is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_add(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ client.gpu_droplets.firewalls.tags.with_raw_response.add(
+ firewall_id="",
+ tags=["frontend"],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_remove(self, client: Gradient) -> None:
+ tag = client.gpu_droplets.firewalls.tags.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ )
+ assert tag is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_remove(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.tags.with_raw_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tag = response.parse()
+ assert tag is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_remove(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.tags.with_streaming_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tag = response.parse()
+ assert tag is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_remove(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ client.gpu_droplets.firewalls.tags.with_raw_response.remove(
+ firewall_id="",
+ tags=["frontend"],
+ )
+
+
+class TestAsyncTags:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_add(self, async_client: AsyncGradient) -> None:
+ tag = await async_client.gpu_droplets.firewalls.tags.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ )
+ assert tag is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_add(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.tags.with_raw_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tag = await response.parse()
+ assert tag is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.tags.with_streaming_response.add(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tag = await response.parse()
+ assert tag is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_add(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ await async_client.gpu_droplets.firewalls.tags.with_raw_response.add(
+ firewall_id="",
+ tags=["frontend"],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_remove(self, async_client: AsyncGradient) -> None:
+ tag = await async_client.gpu_droplets.firewalls.tags.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ )
+ assert tag is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_remove(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.tags.with_raw_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ tag = await response.parse()
+ assert tag is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.tags.with_streaming_response.remove(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ tags=["frontend"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ tag = await response.parse()
+ assert tag is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_remove(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ await async_client.gpu_droplets.firewalls.tags.with_raw_response.remove(
+ firewall_id="",
+ tags=["frontend"],
+ )
diff --git a/tests/api_resources/gpu_droplets/floating_ips/__init__.py b/tests/api_resources/gpu_droplets/floating_ips/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/floating_ips/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py
new file mode 100644
index 00000000..31376bca
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py
@@ -0,0 +1,396 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets.floating_ips import (
+ ActionListResponse,
+ ActionCreateResponse,
+ ActionRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestActions:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_1(self, client: Gradient) -> None:
+ action = client.gpu_droplets.floating_ips.actions.create(
+ floating_ip="45.55.96.47",
+ type="assign",
+ )
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.floating_ips.actions.with_raw_response.create(
+ floating_ip="45.55.96.47",
+ type="assign",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.floating_ips.actions.with_streaming_response.create(
+ floating_ip="45.55.96.47",
+ type="assign",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_create_overload_1(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ client.gpu_droplets.floating_ips.actions.with_raw_response.create(
+ floating_ip="",
+ type="assign",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_2(self, client: Gradient) -> None:
+ action = client.gpu_droplets.floating_ips.actions.create(
+ floating_ip="45.55.96.47",
+ droplet_id=758604968,
+ type="assign",
+ )
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.floating_ips.actions.with_raw_response.create(
+ floating_ip="45.55.96.47",
+ droplet_id=758604968,
+ type="assign",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.floating_ips.actions.with_streaming_response.create(
+ floating_ip="45.55.96.47",
+ droplet_id=758604968,
+ type="assign",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_create_overload_2(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ client.gpu_droplets.floating_ips.actions.with_raw_response.create(
+ floating_ip="",
+ droplet_id=758604968,
+ type="assign",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ action = client.gpu_droplets.floating_ips.actions.retrieve(
+ action_id=36804636,
+ floating_ip="45.55.96.47",
+ )
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ floating_ip="45.55.96.47",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.floating_ips.actions.with_streaming_response.retrieve(
+ action_id=36804636,
+ floating_ip="45.55.96.47",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ floating_ip="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ action = client.gpu_droplets.floating_ips.actions.list(
+ "45.55.96.47",
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.floating_ips.actions.with_raw_response.list(
+ "45.55.96.47",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.floating_ips.actions.with_streaming_response.list(
+ "45.55.96.47",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ client.gpu_droplets.floating_ips.actions.with_raw_response.list(
+ "",
+ )
+
+
+class TestAsyncActions:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.floating_ips.actions.create(
+ floating_ip="45.55.96.47",
+ type="assign",
+ )
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create(
+ floating_ip="45.55.96.47",
+ type="assign",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.create(
+ floating_ip="45.55.96.47",
+ type="assign",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_create_overload_1(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create(
+ floating_ip="",
+ type="assign",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.floating_ips.actions.create(
+ floating_ip="45.55.96.47",
+ droplet_id=758604968,
+ type="assign",
+ )
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create(
+ floating_ip="45.55.96.47",
+ droplet_id=758604968,
+ type="assign",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.create(
+ floating_ip="45.55.96.47",
+ droplet_id=758604968,
+ type="assign",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionCreateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_create_overload_2(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create(
+ floating_ip="",
+ droplet_id=758604968,
+ type="assign",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.floating_ips.actions.retrieve(
+ action_id=36804636,
+ floating_ip="45.55.96.47",
+ )
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ floating_ip="45.55.96.47",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.retrieve(
+ action_id=36804636,
+ floating_ip="45.55.96.47",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ await async_client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ floating_ip="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.floating_ips.actions.list(
+ "45.55.96.47",
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.list(
+ "45.55.96.47",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.list(
+ "45.55.96.47",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ await async_client.gpu_droplets.floating_ips.actions.with_raw_response.list(
+ "",
+ )
diff --git a/tests/api_resources/gpu_droplets/images/__init__.py b/tests/api_resources/gpu_droplets/images/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/images/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/gpu_droplets/images/test_actions.py b/tests/api_resources/gpu_droplets/images/test_actions.py
new file mode 100644
index 00000000..7cc7b4d4
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/images/test_actions.py
@@ -0,0 +1,321 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.shared import Action
+from gradient.types.gpu_droplets.images import ActionListResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestActions:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_1(self, client: Gradient) -> None:
+ action = client.gpu_droplets.images.actions.create(
+ image_id=62137902,
+ type="convert",
+ )
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.images.actions.with_raw_response.create(
+ image_id=62137902,
+ type="convert",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.images.actions.with_streaming_response.create(
+ image_id=62137902,
+ type="convert",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_2(self, client: Gradient) -> None:
+ action = client.gpu_droplets.images.actions.create(
+ image_id=62137902,
+ region="nyc3",
+ type="convert",
+ )
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.images.actions.with_raw_response.create(
+ image_id=62137902,
+ region="nyc3",
+ type="convert",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.images.actions.with_streaming_response.create(
+ image_id=62137902,
+ region="nyc3",
+ type="convert",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ action = client.gpu_droplets.images.actions.retrieve(
+ action_id=36804636,
+ image_id=62137902,
+ )
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.images.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ image_id=62137902,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.images.actions.with_streaming_response.retrieve(
+ action_id=36804636,
+ image_id=62137902,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ action = client.gpu_droplets.images.actions.list(
+ 62137902,
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.images.actions.with_raw_response.list(
+ 62137902,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.images.actions.with_streaming_response.list(
+ 62137902,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncActions:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.images.actions.create(
+ image_id=62137902,
+ type="convert",
+ )
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.images.actions.with_raw_response.create(
+ image_id=62137902,
+ type="convert",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.images.actions.with_streaming_response.create(
+ image_id=62137902,
+ type="convert",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.images.actions.create(
+ image_id=62137902,
+ region="nyc3",
+ type="convert",
+ )
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.images.actions.with_raw_response.create(
+ image_id=62137902,
+ region="nyc3",
+ type="convert",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.images.actions.with_streaming_response.create(
+ image_id=62137902,
+ region="nyc3",
+ type="convert",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.images.actions.retrieve(
+ action_id=36804636,
+ image_id=62137902,
+ )
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.images.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ image_id=62137902,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.images.actions.with_streaming_response.retrieve(
+ action_id=36804636,
+ image_id=62137902,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(Action, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.images.actions.list(
+ 62137902,
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.images.actions.with_raw_response.list(
+ 62137902,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.images.actions.with_streaming_response.list(
+ 62137902,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/gpu_droplets/load_balancers/__init__.py b/tests/api_resources/gpu_droplets/load_balancers/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/load_balancers/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py
new file mode 100644
index 00000000..884032dd
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py
@@ -0,0 +1,206 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestDroplets:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_add(self, client: Gradient) -> None:
+ droplet = client.gpu_droplets.load_balancers.droplets.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ )
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_add(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.droplets.with_raw_response.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ droplet = response.parse()
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_add(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.droplets.with_streaming_response.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ droplet = response.parse()
+ assert droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_add(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ client.gpu_droplets.load_balancers.droplets.with_raw_response.add(
+ lb_id="",
+ droplet_ids=[3164444, 3164445],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_remove(self, client: Gradient) -> None:
+ droplet = client.gpu_droplets.load_balancers.droplets.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ )
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_remove(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.droplets.with_raw_response.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ droplet = response.parse()
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_remove(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.droplets.with_streaming_response.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ droplet = response.parse()
+ assert droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_remove(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ client.gpu_droplets.load_balancers.droplets.with_raw_response.remove(
+ lb_id="",
+ droplet_ids=[3164444, 3164445],
+ )
+
+
+class TestAsyncDroplets:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_add(self, async_client: AsyncGradient) -> None:
+ droplet = await async_client.gpu_droplets.load_balancers.droplets.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ )
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_add(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ droplet = await response.parse()
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.droplets.with_streaming_response.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ droplet = await response.parse()
+ assert droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_add(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.add(
+ lb_id="",
+ droplet_ids=[3164444, 3164445],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_remove(self, async_client: AsyncGradient) -> None:
+ droplet = await async_client.gpu_droplets.load_balancers.droplets.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ )
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_remove(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ droplet = await response.parse()
+ assert droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.droplets.with_streaming_response.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ droplet_ids=[3164444, 3164445],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ droplet = await response.parse()
+ assert droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_remove(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.remove(
+ lb_id="",
+ droplet_ids=[3164444, 3164445],
+ )
diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py
new file mode 100644
index 00000000..43498e13
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py
@@ -0,0 +1,318 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestForwardingRules:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_add(self, client: Gradient) -> None:
+ forwarding_rule = client.gpu_droplets.load_balancers.forwarding_rules.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert forwarding_rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_add(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ forwarding_rule = response.parse()
+ assert forwarding_rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_add(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ forwarding_rule = response.parse()
+ assert forwarding_rule is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_add(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add(
+ lb_id="",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_remove(self, client: Gradient) -> None:
+ forwarding_rule = client.gpu_droplets.load_balancers.forwarding_rules.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert forwarding_rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_remove(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ forwarding_rule = response.parse()
+ assert forwarding_rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_remove(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ forwarding_rule = response.parse()
+ assert forwarding_rule is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_remove(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove(
+ lb_id="",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+
+class TestAsyncForwardingRules:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_add(self, async_client: AsyncGradient) -> None:
+ forwarding_rule = await async_client.gpu_droplets.load_balancers.forwarding_rules.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert forwarding_rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_add(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ forwarding_rule = await response.parse()
+ assert forwarding_rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.add(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ forwarding_rule = await response.parse()
+ assert forwarding_rule is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_add(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add(
+ lb_id="",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_remove(self, async_client: AsyncGradient) -> None:
+ forwarding_rule = await async_client.gpu_droplets.load_balancers.forwarding_rules.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert forwarding_rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_remove(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ forwarding_rule = await response.parse()
+ assert forwarding_rule is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.remove(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ forwarding_rule = await response.parse()
+ assert forwarding_rule is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_remove(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove(
+ lb_id="",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
diff --git a/tests/api_resources/gpu_droplets/test_actions.py b/tests/api_resources/gpu_droplets/test_actions.py
new file mode 100644
index 00000000..897414b7
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_actions.py
@@ -0,0 +1,1209 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import (
+ ActionListResponse,
+ ActionInitiateResponse,
+ ActionRetrieveResponse,
+ ActionBulkInitiateResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestActions:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.retrieve(
+ action_id=36804636,
+ droplet_id=3164444,
+ )
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.retrieve(
+ action_id=36804636,
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.list(
+ droplet_id=3164444,
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.list(
+ droplet_id=3164444,
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.list(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.list(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_bulk_initiate_overload_1(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.bulk_initiate(
+ type="reboot",
+ )
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_bulk_initiate_with_all_params_overload_1(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.bulk_initiate(
+ type="reboot",
+ tag_name="tag_name",
+ )
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_bulk_initiate_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.bulk_initiate(
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_bulk_initiate_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.bulk_initiate(
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_bulk_initiate_overload_2(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.bulk_initiate(
+ type="reboot",
+ )
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_bulk_initiate_with_all_params_overload_2(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.bulk_initiate(
+ type="reboot",
+ tag_name="tag_name",
+ name="Nifty New Snapshot",
+ )
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_bulk_initiate_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.bulk_initiate(
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_bulk_initiate_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.bulk_initiate(
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_overload_1(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_overload_2(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_with_all_params_overload_2(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ backup_policy={
+ "hour": 20,
+ "plan": "daily",
+ "weekday": "SUN",
+ },
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_overload_3(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_with_all_params_overload_3(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ backup_policy={
+ "hour": 20,
+ "plan": "weekly",
+ "weekday": "SUN",
+ },
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_overload_3(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_overload_3(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_overload_4(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_with_all_params_overload_4(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ image=12389723,
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_overload_4(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_overload_4(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_overload_5(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_with_all_params_overload_5(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ disk=True,
+ size="s-2vcpu-2gb",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_overload_5(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_overload_5(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_overload_6(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_with_all_params_overload_6(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ image="ubuntu-20-04-x64",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_overload_6(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_overload_6(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_overload_7(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_with_all_params_overload_7(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ name="nifty-new-name",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_overload_7(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_overload_7(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_overload_8(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_with_all_params_overload_8(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ kernel=12389723,
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_overload_8(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_overload_8(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_overload_9(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_with_all_params_overload_9(self, client: Gradient) -> None:
+ action = client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ name="Nifty New Snapshot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_overload_9(self, client: Gradient) -> None:
+ response = client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_overload_9(self, client: Gradient) -> None:
+ with client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncActions:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.retrieve(
+ action_id=36804636,
+ droplet_id=3164444,
+ )
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.retrieve(
+ action_id=36804636,
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.list(
+ droplet_id=3164444,
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.list(
+ droplet_id=3164444,
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.list(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.list(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_bulk_initiate_overload_1(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.bulk_initiate(
+ type="reboot",
+ )
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_bulk_initiate_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.bulk_initiate(
+ type="reboot",
+ tag_name="tag_name",
+ )
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_bulk_initiate_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.bulk_initiate(
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_bulk_initiate_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.bulk_initiate(
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_bulk_initiate_overload_2(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.bulk_initiate(
+ type="reboot",
+ )
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_bulk_initiate_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.bulk_initiate(
+ type="reboot",
+ tag_name="tag_name",
+ name="Nifty New Snapshot",
+ )
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_bulk_initiate_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.bulk_initiate(
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_bulk_initiate_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.bulk_initiate(
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionBulkInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_overload_1(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_overload_2(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ backup_policy={
+ "hour": 20,
+ "plan": "daily",
+ "weekday": "SUN",
+ },
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_overload_3(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_with_all_params_overload_3(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ backup_policy={
+ "hour": 20,
+ "plan": "weekly",
+ "weekday": "SUN",
+ },
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_overload_3(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_overload_3(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="enable_backups",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_overload_4(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_with_all_params_overload_4(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ image=12389723,
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_overload_4(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_overload_4(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_overload_5(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_with_all_params_overload_5(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ disk=True,
+ size="s-2vcpu-2gb",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_overload_5(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_overload_5(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_overload_6(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_with_all_params_overload_6(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ image="ubuntu-20-04-x64",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_overload_6(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_overload_6(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_overload_7(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_with_all_params_overload_7(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ name="nifty-new-name",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_overload_7(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_overload_7(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_overload_8(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_with_all_params_overload_8(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ kernel=12389723,
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_overload_8(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_overload_8(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_overload_9(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_with_all_params_overload_9(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.actions.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ name="Nifty New Snapshot",
+ )
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_overload_9(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.actions.with_raw_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_overload_9(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.actions.with_streaming_response.initiate(
+ droplet_id=3164444,
+ type="reboot",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/gpu_droplets/test_autoscale.py b/tests/api_resources/gpu_droplets/test_autoscale.py
new file mode 100644
index 00000000..d6322172
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_autoscale.py
@@ -0,0 +1,953 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import (
+ AutoscaleListResponse,
+ AutoscaleCreateResponse,
+ AutoscaleUpdateResponse,
+ AutoscaleRetrieveResponse,
+ AutoscaleListHistoryResponse,
+ AutoscaleListMembersResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAutoscale:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.create(
+ config={
+ "max_instances": 5,
+ "min_instances": 1,
+ },
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ )
+ assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.create(
+ config={
+ "max_instances": 5,
+ "min_instances": 1,
+ "cooldown_minutes": 10,
+ "target_cpu_utilization": 0.5,
+ "target_memory_utilization": 0.6,
+ },
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ "ipv6": True,
+ "name": "example.com",
+ "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988",
+ "tags": ["env:prod", "web"],
+ "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n",
+ "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000",
+ "with_droplet_agent": True,
+ },
+ name="my-autoscale-pool",
+ )
+ assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.gpu_droplets.autoscale.with_raw_response.create(
+ config={
+ "max_instances": 5,
+ "min_instances": 1,
+ },
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.gpu_droplets.autoscale.with_streaming_response.create(
+ config={
+ "max_instances": 5,
+ "min_instances": 1,
+ },
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.retrieve(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+ assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.autoscale.with_raw_response.retrieve(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.autoscale.with_streaming_response.retrieve(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ client.gpu_droplets.autoscale.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.update(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ config={"target_number_instances": 2},
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ )
+ assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.update(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ config={"target_number_instances": 2},
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ "ipv6": True,
+ "name": "example.com",
+ "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988",
+ "tags": ["env:prod", "web"],
+ "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n",
+ "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000",
+ "with_droplet_agent": True,
+ },
+ name="my-autoscale-pool",
+ )
+ assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.gpu_droplets.autoscale.with_raw_response.update(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ config={"target_number_instances": 2},
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.gpu_droplets.autoscale.with_streaming_response.update(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ config={"target_number_instances": 2},
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ client.gpu_droplets.autoscale.with_raw_response.update(
+ autoscale_pool_id="",
+ config={"target_number_instances": 2},
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.list()
+ assert_matches_type(AutoscaleListResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.list(
+ name="name",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(AutoscaleListResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.autoscale.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleListResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.autoscale.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleListResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.delete(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+ assert autoscale is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.gpu_droplets.autoscale.with_raw_response.delete(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = response.parse()
+ assert autoscale is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.gpu_droplets.autoscale.with_streaming_response.delete(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = response.parse()
+ assert autoscale is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ client.gpu_droplets.autoscale.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete_dangerous(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.delete_dangerous(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ x_dangerous=True,
+ )
+ assert autoscale is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete_dangerous(self, client: Gradient) -> None:
+ response = client.gpu_droplets.autoscale.with_raw_response.delete_dangerous(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ x_dangerous=True,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = response.parse()
+ assert autoscale is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete_dangerous(self, client: Gradient) -> None:
+ with client.gpu_droplets.autoscale.with_streaming_response.delete_dangerous(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ x_dangerous=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = response.parse()
+ assert autoscale is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete_dangerous(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ client.gpu_droplets.autoscale.with_raw_response.delete_dangerous(
+ autoscale_pool_id="",
+ x_dangerous=True,
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_history(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.list_history(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+ assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_history_with_all_params(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.list_history(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_history(self, client: Gradient) -> None:
+ response = client.gpu_droplets.autoscale.with_raw_response.list_history(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_history(self, client: Gradient) -> None:
+ with client.gpu_droplets.autoscale.with_streaming_response.list_history(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list_history(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ client.gpu_droplets.autoscale.with_raw_response.list_history(
+ autoscale_pool_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_members(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.list_members(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+ assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_members_with_all_params(self, client: Gradient) -> None:
+ autoscale = client.gpu_droplets.autoscale.list_members(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_members(self, client: Gradient) -> None:
+ response = client.gpu_droplets.autoscale.with_raw_response.list_members(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_members(self, client: Gradient) -> None:
+ with client.gpu_droplets.autoscale.with_streaming_response.list_members(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = response.parse()
+ assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list_members(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ client.gpu_droplets.autoscale.with_raw_response.list_members(
+ autoscale_pool_id="",
+ )
+
+
+class TestAsyncAutoscale:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.create(
+ config={
+ "max_instances": 5,
+ "min_instances": 1,
+ },
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ )
+ assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.create(
+ config={
+ "max_instances": 5,
+ "min_instances": 1,
+ "cooldown_minutes": 10,
+ "target_cpu_utilization": 0.5,
+ "target_memory_utilization": 0.6,
+ },
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ "ipv6": True,
+ "name": "example.com",
+ "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988",
+ "tags": ["env:prod", "web"],
+ "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n",
+ "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000",
+ "with_droplet_agent": True,
+ },
+ name="my-autoscale-pool",
+ )
+ assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.autoscale.with_raw_response.create(
+ config={
+ "max_instances": 5,
+ "min_instances": 1,
+ },
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.autoscale.with_streaming_response.create(
+ config={
+ "max_instances": 5,
+ "min_instances": 1,
+ },
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.retrieve(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+ assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.autoscale.with_raw_response.retrieve(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.autoscale.with_streaming_response.retrieve(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ await async_client.gpu_droplets.autoscale.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.update(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ config={"target_number_instances": 2},
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ )
+ assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.update(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ config={"target_number_instances": 2},
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ "ipv6": True,
+ "name": "example.com",
+ "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988",
+ "tags": ["env:prod", "web"],
+ "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n",
+ "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000",
+ "with_droplet_agent": True,
+ },
+ name="my-autoscale-pool",
+ )
+ assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.autoscale.with_raw_response.update(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ config={"target_number_instances": 2},
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.autoscale.with_streaming_response.update(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ config={"target_number_instances": 2},
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ await async_client.gpu_droplets.autoscale.with_raw_response.update(
+ autoscale_pool_id="",
+ config={"target_number_instances": 2},
+ droplet_template={
+ "image": "ubuntu-20-04-x64",
+ "region": "nyc3",
+ "size": "c-2",
+ "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ },
+ name="my-autoscale-pool",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.list()
+ assert_matches_type(AutoscaleListResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.list(
+ name="name",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(AutoscaleListResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.autoscale.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleListResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.autoscale.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleListResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.delete(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+ assert autoscale is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.autoscale.with_raw_response.delete(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = await response.parse()
+ assert autoscale is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.autoscale.with_streaming_response.delete(
+ "0d3db13e-a604-4944-9827-7ec2642d32ac",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = await response.parse()
+ assert autoscale is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ await async_client.gpu_droplets.autoscale.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete_dangerous(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.delete_dangerous(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ x_dangerous=True,
+ )
+ assert autoscale is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete_dangerous(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.autoscale.with_raw_response.delete_dangerous(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ x_dangerous=True,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = await response.parse()
+ assert autoscale is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.autoscale.with_streaming_response.delete_dangerous(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ x_dangerous=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = await response.parse()
+ assert autoscale is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete_dangerous(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ await async_client.gpu_droplets.autoscale.with_raw_response.delete_dangerous(
+ autoscale_pool_id="",
+ x_dangerous=True,
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_history(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.list_history(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+ assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_history_with_all_params(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.list_history(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_history(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.autoscale.with_raw_response.list_history(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_history(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.autoscale.with_streaming_response.list_history(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list_history(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ await async_client.gpu_droplets.autoscale.with_raw_response.list_history(
+ autoscale_pool_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_members(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.list_members(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+ assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_members_with_all_params(self, async_client: AsyncGradient) -> None:
+ autoscale = await async_client.gpu_droplets.autoscale.list_members(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_members(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.autoscale.with_raw_response.list_members(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_members(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.autoscale.with_streaming_response.list_members(
+ autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ autoscale = await response.parse()
+ assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list_members(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"):
+ await async_client.gpu_droplets.autoscale.with_raw_response.list_members(
+ autoscale_pool_id="",
+ )
diff --git a/tests/api_resources/gpu_droplets/test_backups.py b/tests/api_resources/gpu_droplets/test_backups.py
new file mode 100644
index 00000000..c5e8615f
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_backups.py
@@ -0,0 +1,315 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import (
+ BackupListResponse,
+ BackupListPoliciesResponse,
+ BackupRetrievePolicyResponse,
+ BackupListSupportedPoliciesResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestBackups:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ backup = client.gpu_droplets.backups.list(
+ droplet_id=3164444,
+ )
+ assert_matches_type(BackupListResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ backup = client.gpu_droplets.backups.list(
+ droplet_id=3164444,
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(BackupListResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.backups.with_raw_response.list(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ backup = response.parse()
+ assert_matches_type(BackupListResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.backups.with_streaming_response.list(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ backup = response.parse()
+ assert_matches_type(BackupListResponse, backup, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_policies(self, client: Gradient) -> None:
+ backup = client.gpu_droplets.backups.list_policies()
+ assert_matches_type(BackupListPoliciesResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_policies_with_all_params(self, client: Gradient) -> None:
+ backup = client.gpu_droplets.backups.list_policies(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(BackupListPoliciesResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_policies(self, client: Gradient) -> None:
+ response = client.gpu_droplets.backups.with_raw_response.list_policies()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ backup = response.parse()
+ assert_matches_type(BackupListPoliciesResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_policies(self, client: Gradient) -> None:
+ with client.gpu_droplets.backups.with_streaming_response.list_policies() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ backup = response.parse()
+ assert_matches_type(BackupListPoliciesResponse, backup, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_supported_policies(self, client: Gradient) -> None:
+ backup = client.gpu_droplets.backups.list_supported_policies()
+ assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_supported_policies(self, client: Gradient) -> None:
+ response = client.gpu_droplets.backups.with_raw_response.list_supported_policies()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ backup = response.parse()
+ assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_supported_policies(self, client: Gradient) -> None:
+ with client.gpu_droplets.backups.with_streaming_response.list_supported_policies() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ backup = response.parse()
+ assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_policy(self, client: Gradient) -> None:
+ backup = client.gpu_droplets.backups.retrieve_policy(
+ 3164444,
+ )
+ assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve_policy(self, client: Gradient) -> None:
+ response = client.gpu_droplets.backups.with_raw_response.retrieve_policy(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ backup = response.parse()
+ assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve_policy(self, client: Gradient) -> None:
+ with client.gpu_droplets.backups.with_streaming_response.retrieve_policy(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ backup = response.parse()
+ assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncBackups:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ backup = await async_client.gpu_droplets.backups.list(
+ droplet_id=3164444,
+ )
+ assert_matches_type(BackupListResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ backup = await async_client.gpu_droplets.backups.list(
+ droplet_id=3164444,
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(BackupListResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.backups.with_raw_response.list(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ backup = await response.parse()
+ assert_matches_type(BackupListResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.backups.with_streaming_response.list(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ backup = await response.parse()
+ assert_matches_type(BackupListResponse, backup, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_policies(self, async_client: AsyncGradient) -> None:
+ backup = await async_client.gpu_droplets.backups.list_policies()
+ assert_matches_type(BackupListPoliciesResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_policies_with_all_params(self, async_client: AsyncGradient) -> None:
+ backup = await async_client.gpu_droplets.backups.list_policies(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(BackupListPoliciesResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_policies(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.backups.with_raw_response.list_policies()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ backup = await response.parse()
+ assert_matches_type(BackupListPoliciesResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_policies(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.backups.with_streaming_response.list_policies() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ backup = await response.parse()
+ assert_matches_type(BackupListPoliciesResponse, backup, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_supported_policies(self, async_client: AsyncGradient) -> None:
+ backup = await async_client.gpu_droplets.backups.list_supported_policies()
+ assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_supported_policies(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.backups.with_raw_response.list_supported_policies()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ backup = await response.parse()
+ assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_supported_policies(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.backups.with_streaming_response.list_supported_policies() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ backup = await response.parse()
+ assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_policy(self, async_client: AsyncGradient) -> None:
+ backup = await async_client.gpu_droplets.backups.retrieve_policy(
+ 3164444,
+ )
+ assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve_policy(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.backups.with_raw_response.retrieve_policy(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ backup = await response.parse()
+ assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve_policy(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.backups.with_streaming_response.retrieve_policy(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ backup = await response.parse()
+ assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py
new file mode 100644
index 00000000..bdaaeab9
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py
@@ -0,0 +1,431 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import (
+ DestroyWithAssociatedResourceListResponse,
+ DestroyWithAssociatedResourceCheckStatusResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestDestroyWithAssociatedResources:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.list(
+ 3164444,
+ )
+ assert_matches_type(
+ DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ destroy_with_associated_resource = response.parse()
+ assert_matches_type(
+ DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ destroy_with_associated_resource = response.parse()
+ assert_matches_type(
+ DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_check_status(self, client: Gradient) -> None:
+ destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.check_status(
+ 3164444,
+ )
+ assert_matches_type(
+ DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_check_status(self, client: Gradient) -> None:
+ response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ destroy_with_associated_resource = response.parse()
+ assert_matches_type(
+ DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_check_status(self, client: Gradient) -> None:
+ with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ destroy_with_associated_resource = response.parse()
+ assert_matches_type(
+ DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete_dangerous(self, client: Gradient) -> None:
+ destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(
+ droplet_id=3164444,
+ x_dangerous=True,
+ )
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete_dangerous(self, client: Gradient) -> None:
+ response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous(
+ droplet_id=3164444,
+ x_dangerous=True,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ destroy_with_associated_resource = response.parse()
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete_dangerous(self, client: Gradient) -> None:
+ with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous(
+ droplet_id=3164444,
+ x_dangerous=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ destroy_with_associated_resource = response.parse()
+ assert destroy_with_associated_resource is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete_selective(self, client: Gradient) -> None:
+ destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_selective(
+ droplet_id=3164444,
+ )
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete_selective_with_all_params(self, client: Gradient) -> None:
+ destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_selective(
+ droplet_id=3164444,
+ floating_ips=["6186916"],
+ reserved_ips=["6186916"],
+ snapshots=["61486916"],
+ volume_snapshots=["edb0478d-7436-11ea-86e6-0a58ac144b91"],
+ volumes=["ba49449a-7435-11ea-b89e-0a58ac14480f"],
+ )
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete_selective(self, client: Gradient) -> None:
+ response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_selective(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ destroy_with_associated_resource = response.parse()
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete_selective(self, client: Gradient) -> None:
+ with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_selective(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ destroy_with_associated_resource = response.parse()
+ assert destroy_with_associated_resource is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retry(self, client: Gradient) -> None:
+ destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.retry(
+ 3164444,
+ )
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retry(self, client: Gradient) -> None:
+ response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ destroy_with_associated_resource = response.parse()
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retry(self, client: Gradient) -> None:
+ with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ destroy_with_associated_resource = response.parse()
+ assert destroy_with_associated_resource is None
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncDestroyWithAssociatedResources:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.list(
+ 3164444,
+ )
+ assert_matches_type(
+ DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ destroy_with_associated_resource = await response.parse()
+ assert_matches_type(
+ DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ destroy_with_associated_resource = await response.parse()
+ assert_matches_type(
+ DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_check_status(self, async_client: AsyncGradient) -> None:
+ destroy_with_associated_resource = (
+ await async_client.gpu_droplets.destroy_with_associated_resources.check_status(
+ 3164444,
+ )
+ )
+ assert_matches_type(
+ DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_check_status(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ destroy_with_associated_resource = await response.parse()
+ assert_matches_type(
+ DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_check_status(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ destroy_with_associated_resource = await response.parse()
+ assert_matches_type(
+ DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"]
+ )
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete_dangerous(self, async_client: AsyncGradient) -> None:
+ destroy_with_associated_resource = (
+ await async_client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(
+ droplet_id=3164444,
+ x_dangerous=True,
+ )
+ )
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete_dangerous(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous(
+ droplet_id=3164444,
+ x_dangerous=True,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ destroy_with_associated_resource = await response.parse()
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous(
+ droplet_id=3164444,
+ x_dangerous=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ destroy_with_associated_resource = await response.parse()
+ assert destroy_with_associated_resource is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete_selective(self, async_client: AsyncGradient) -> None:
+ destroy_with_associated_resource = (
+ await async_client.gpu_droplets.destroy_with_associated_resources.delete_selective(
+ droplet_id=3164444,
+ )
+ )
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete_selective_with_all_params(self, async_client: AsyncGradient) -> None:
+ destroy_with_associated_resource = (
+ await async_client.gpu_droplets.destroy_with_associated_resources.delete_selective(
+ droplet_id=3164444,
+ floating_ips=["6186916"],
+ reserved_ips=["6186916"],
+ snapshots=["61486916"],
+ volume_snapshots=["edb0478d-7436-11ea-86e6-0a58ac144b91"],
+ volumes=["ba49449a-7435-11ea-b89e-0a58ac14480f"],
+ )
+ )
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete_selective(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_selective(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ destroy_with_associated_resource = await response.parse()
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete_selective(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_selective(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ destroy_with_associated_resource = await response.parse()
+ assert destroy_with_associated_resource is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retry(self, async_client: AsyncGradient) -> None:
+ destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.retry(
+ 3164444,
+ )
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retry(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ destroy_with_associated_resource = await response.parse()
+ assert destroy_with_associated_resource is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retry(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ destroy_with_associated_resource = await response.parse()
+ assert destroy_with_associated_resource is None
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/gpu_droplets/test_firewalls.py b/tests/api_resources/gpu_droplets/test_firewalls.py
new file mode 100644
index 00000000..60c7bbc9
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_firewalls.py
@@ -0,0 +1,617 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import (
+ FirewallListResponse,
+ FirewallCreateResponse,
+ FirewallUpdateResponse,
+ FirewallRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestFirewalls:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ firewall = client.gpu_droplets.firewalls.create()
+ assert_matches_type(FirewallCreateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ firewall = client.gpu_droplets.firewalls.create(
+ body={
+ "droplet_ids": [8043964],
+ "inbound_rules": [
+ {
+ "ports": "80",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ },
+ {
+ "ports": "22",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["18.0.0.0/8"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["gateway"],
+ },
+ },
+ ],
+ "name": "firewall",
+ "outbound_rules": [
+ {
+ "destinations": {
+ "addresses": ["0.0.0.0/0", "::/0"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ "ports": "80",
+ "protocol": "tcp",
+ }
+ ],
+ "tags": ["base-image", "prod"],
+ },
+ )
+ assert_matches_type(FirewallCreateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ firewall = response.parse()
+ assert_matches_type(FirewallCreateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ firewall = response.parse()
+ assert_matches_type(FirewallCreateResponse, firewall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ firewall = client.gpu_droplets.firewalls.retrieve(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+ assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.with_raw_response.retrieve(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ firewall = response.parse()
+ assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.with_streaming_response.retrieve(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ firewall = response.parse()
+ assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ client.gpu_droplets.firewalls.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ firewall = client.gpu_droplets.firewalls.update(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ firewall={"name": "frontend-firewall"},
+ )
+ assert_matches_type(FirewallUpdateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ firewall = client.gpu_droplets.firewalls.update(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ firewall={
+ "droplet_ids": [8043964],
+ "inbound_rules": [
+ {
+ "ports": "8080",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ },
+ {
+ "ports": "22",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["18.0.0.0/8"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["gateway"],
+ },
+ },
+ ],
+ "name": "frontend-firewall",
+ "outbound_rules": [
+ {
+ "destinations": {
+ "addresses": ["0.0.0.0/0", "::/0"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ "ports": "8080",
+ "protocol": "tcp",
+ }
+ ],
+ "tags": ["frontend"],
+ },
+ )
+ assert_matches_type(FirewallUpdateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.with_raw_response.update(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ firewall={"name": "frontend-firewall"},
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ firewall = response.parse()
+ assert_matches_type(FirewallUpdateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.with_streaming_response.update(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ firewall={"name": "frontend-firewall"},
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ firewall = response.parse()
+ assert_matches_type(FirewallUpdateResponse, firewall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ client.gpu_droplets.firewalls.with_raw_response.update(
+ firewall_id="",
+ firewall={"name": "frontend-firewall"},
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ firewall = client.gpu_droplets.firewalls.list()
+ assert_matches_type(FirewallListResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ firewall = client.gpu_droplets.firewalls.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(FirewallListResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ firewall = response.parse()
+ assert_matches_type(FirewallListResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ firewall = response.parse()
+ assert_matches_type(FirewallListResponse, firewall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ firewall = client.gpu_droplets.firewalls.delete(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+ assert firewall is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.gpu_droplets.firewalls.with_raw_response.delete(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ firewall = response.parse()
+ assert firewall is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.gpu_droplets.firewalls.with_streaming_response.delete(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ firewall = response.parse()
+ assert firewall is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ client.gpu_droplets.firewalls.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncFirewalls:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ firewall = await async_client.gpu_droplets.firewalls.create()
+ assert_matches_type(FirewallCreateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ firewall = await async_client.gpu_droplets.firewalls.create(
+ body={
+ "droplet_ids": [8043964],
+ "inbound_rules": [
+ {
+ "ports": "80",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ },
+ {
+ "ports": "22",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["18.0.0.0/8"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["gateway"],
+ },
+ },
+ ],
+ "name": "firewall",
+ "outbound_rules": [
+ {
+ "destinations": {
+ "addresses": ["0.0.0.0/0", "::/0"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ "ports": "80",
+ "protocol": "tcp",
+ }
+ ],
+ "tags": ["base-image", "prod"],
+ },
+ )
+ assert_matches_type(FirewallCreateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ firewall = await response.parse()
+ assert_matches_type(FirewallCreateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ firewall = await response.parse()
+ assert_matches_type(FirewallCreateResponse, firewall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ firewall = await async_client.gpu_droplets.firewalls.retrieve(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+ assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.with_raw_response.retrieve(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ firewall = await response.parse()
+ assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.with_streaming_response.retrieve(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ firewall = await response.parse()
+ assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ await async_client.gpu_droplets.firewalls.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ firewall = await async_client.gpu_droplets.firewalls.update(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ firewall={"name": "frontend-firewall"},
+ )
+ assert_matches_type(FirewallUpdateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ firewall = await async_client.gpu_droplets.firewalls.update(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ firewall={
+ "droplet_ids": [8043964],
+ "inbound_rules": [
+ {
+ "ports": "8080",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["1.2.3.4", "18.0.0.0/8"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ },
+ {
+ "ports": "22",
+ "protocol": "tcp",
+ "sources": {
+ "addresses": ["18.0.0.0/8"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["gateway"],
+ },
+ },
+ ],
+ "name": "frontend-firewall",
+ "outbound_rules": [
+ {
+ "destinations": {
+ "addresses": ["0.0.0.0/0", "::/0"],
+ "droplet_ids": [8043964],
+ "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"],
+ "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"],
+ "tags": ["base-image", "prod"],
+ },
+ "ports": "8080",
+ "protocol": "tcp",
+ }
+ ],
+ "tags": ["frontend"],
+ },
+ )
+ assert_matches_type(FirewallUpdateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.with_raw_response.update(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ firewall={"name": "frontend-firewall"},
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ firewall = await response.parse()
+ assert_matches_type(FirewallUpdateResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.with_streaming_response.update(
+ firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c",
+ firewall={"name": "frontend-firewall"},
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ firewall = await response.parse()
+ assert_matches_type(FirewallUpdateResponse, firewall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ await async_client.gpu_droplets.firewalls.with_raw_response.update(
+ firewall_id="",
+ firewall={"name": "frontend-firewall"},
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ firewall = await async_client.gpu_droplets.firewalls.list()
+ assert_matches_type(FirewallListResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ firewall = await async_client.gpu_droplets.firewalls.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(FirewallListResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ firewall = await response.parse()
+ assert_matches_type(FirewallListResponse, firewall, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ firewall = await response.parse()
+ assert_matches_type(FirewallListResponse, firewall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ firewall = await async_client.gpu_droplets.firewalls.delete(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+ assert firewall is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.firewalls.with_raw_response.delete(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ firewall = await response.parse()
+ assert firewall is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.firewalls.with_streaming_response.delete(
+ "bb4b2611-3d72-467b-8602-280330ecd65c",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ firewall = await response.parse()
+ assert firewall is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"):
+ await async_client.gpu_droplets.firewalls.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/gpu_droplets/test_floating_ips.py b/tests/api_resources/gpu_droplets/test_floating_ips.py
new file mode 100644
index 00000000..84156532
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_floating_ips.py
@@ -0,0 +1,424 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import (
+ FloatingIPListResponse,
+ FloatingIPCreateResponse,
+ FloatingIPRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestFloatingIPs:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_1(self, client: Gradient) -> None:
+ floating_ip = client.gpu_droplets.floating_ips.create(
+ droplet_id=2457247,
+ )
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.floating_ips.with_raw_response.create(
+ droplet_id=2457247,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ floating_ip = response.parse()
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.floating_ips.with_streaming_response.create(
+ droplet_id=2457247,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ floating_ip = response.parse()
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_2(self, client: Gradient) -> None:
+ floating_ip = client.gpu_droplets.floating_ips.create(
+ region="nyc3",
+ )
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None:
+ floating_ip = client.gpu_droplets.floating_ips.create(
+ region="nyc3",
+ project_id="746c6152-2fa2-11ed-92d3-27aaa54e4988",
+ )
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.floating_ips.with_raw_response.create(
+ region="nyc3",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ floating_ip = response.parse()
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.floating_ips.with_streaming_response.create(
+ region="nyc3",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ floating_ip = response.parse()
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ floating_ip = client.gpu_droplets.floating_ips.retrieve(
+ "45.55.96.47",
+ )
+ assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.floating_ips.with_raw_response.retrieve(
+ "45.55.96.47",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ floating_ip = response.parse()
+ assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.floating_ips.with_streaming_response.retrieve(
+ "45.55.96.47",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ floating_ip = response.parse()
+ assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ client.gpu_droplets.floating_ips.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ floating_ip = client.gpu_droplets.floating_ips.list()
+ assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ floating_ip = client.gpu_droplets.floating_ips.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.floating_ips.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ floating_ip = response.parse()
+ assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.floating_ips.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ floating_ip = response.parse()
+ assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ floating_ip = client.gpu_droplets.floating_ips.delete(
+ "45.55.96.47",
+ )
+ assert floating_ip is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.gpu_droplets.floating_ips.with_raw_response.delete(
+ "45.55.96.47",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ floating_ip = response.parse()
+ assert floating_ip is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.gpu_droplets.floating_ips.with_streaming_response.delete(
+ "45.55.96.47",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ floating_ip = response.parse()
+ assert floating_ip is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ client.gpu_droplets.floating_ips.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncFloatingIPs:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
+ floating_ip = await async_client.gpu_droplets.floating_ips.create(
+ droplet_id=2457247,
+ )
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.floating_ips.with_raw_response.create(
+ droplet_id=2457247,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ floating_ip = await response.parse()
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.floating_ips.with_streaming_response.create(
+ droplet_id=2457247,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ floating_ip = await response.parse()
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
+ floating_ip = await async_client.gpu_droplets.floating_ips.create(
+ region="nyc3",
+ )
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ floating_ip = await async_client.gpu_droplets.floating_ips.create(
+ region="nyc3",
+ project_id="746c6152-2fa2-11ed-92d3-27aaa54e4988",
+ )
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.floating_ips.with_raw_response.create(
+ region="nyc3",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ floating_ip = await response.parse()
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.floating_ips.with_streaming_response.create(
+ region="nyc3",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ floating_ip = await response.parse()
+ assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ floating_ip = await async_client.gpu_droplets.floating_ips.retrieve(
+ "45.55.96.47",
+ )
+ assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.floating_ips.with_raw_response.retrieve(
+ "45.55.96.47",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ floating_ip = await response.parse()
+ assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.floating_ips.with_streaming_response.retrieve(
+ "45.55.96.47",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ floating_ip = await response.parse()
+ assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ await async_client.gpu_droplets.floating_ips.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ floating_ip = await async_client.gpu_droplets.floating_ips.list()
+ assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ floating_ip = await async_client.gpu_droplets.floating_ips.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.floating_ips.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ floating_ip = await response.parse()
+ assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.floating_ips.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ floating_ip = await response.parse()
+ assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ floating_ip = await async_client.gpu_droplets.floating_ips.delete(
+ "45.55.96.47",
+ )
+ assert floating_ip is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.floating_ips.with_raw_response.delete(
+ "45.55.96.47",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ floating_ip = await response.parse()
+ assert floating_ip is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.floating_ips.with_streaming_response.delete(
+ "45.55.96.47",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ floating_ip = await response.parse()
+ assert floating_ip is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"):
+ await async_client.gpu_droplets.floating_ips.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/gpu_droplets/test_images.py b/tests/api_resources/gpu_droplets/test_images.py
new file mode 100644
index 00000000..1bebbe99
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_images.py
@@ -0,0 +1,417 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import (
+ ImageListResponse,
+ ImageCreateResponse,
+ ImageUpdateResponse,
+ ImageRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestImages:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ image = client.gpu_droplets.images.create()
+ assert_matches_type(ImageCreateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ image = client.gpu_droplets.images.create(
+ description="Cloud-optimized image w/ small footprint",
+ distribution="Ubuntu",
+ name="ubuntu-18.04-minimal",
+ region="nyc3",
+ tags=["base-image", "prod"],
+ url="http://cloud-images.ubuntu.com/minimal/releases/bionic/release/ubuntu-18.04-minimal-cloudimg-amd64.img",
+ )
+ assert_matches_type(ImageCreateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.gpu_droplets.images.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ image = response.parse()
+ assert_matches_type(ImageCreateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.gpu_droplets.images.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ image = response.parse()
+ assert_matches_type(ImageCreateResponse, image, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ image = client.gpu_droplets.images.retrieve(
+ 0,
+ )
+ assert_matches_type(ImageRetrieveResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.images.with_raw_response.retrieve(
+ 0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ image = response.parse()
+ assert_matches_type(ImageRetrieveResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.images.with_streaming_response.retrieve(
+ 0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ image = response.parse()
+ assert_matches_type(ImageRetrieveResponse, image, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ image = client.gpu_droplets.images.update(
+ image_id=62137902,
+ )
+ assert_matches_type(ImageUpdateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ image = client.gpu_droplets.images.update(
+ image_id=62137902,
+ description=" ",
+ distribution="Ubuntu",
+ name="Nifty New Snapshot",
+ )
+ assert_matches_type(ImageUpdateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.gpu_droplets.images.with_raw_response.update(
+ image_id=62137902,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ image = response.parse()
+ assert_matches_type(ImageUpdateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.gpu_droplets.images.with_streaming_response.update(
+ image_id=62137902,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ image = response.parse()
+ assert_matches_type(ImageUpdateResponse, image, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ image = client.gpu_droplets.images.list()
+ assert_matches_type(ImageListResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ image = client.gpu_droplets.images.list(
+ page=1,
+ per_page=1,
+ private=True,
+ tag_name="tag_name",
+ type="application",
+ )
+ assert_matches_type(ImageListResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.images.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ image = response.parse()
+ assert_matches_type(ImageListResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.images.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ image = response.parse()
+ assert_matches_type(ImageListResponse, image, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ image = client.gpu_droplets.images.delete(
+ 62137902,
+ )
+ assert image is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.gpu_droplets.images.with_raw_response.delete(
+ 62137902,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ image = response.parse()
+ assert image is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.gpu_droplets.images.with_streaming_response.delete(
+ 62137902,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ image = response.parse()
+ assert image is None
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncImages:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ image = await async_client.gpu_droplets.images.create()
+ assert_matches_type(ImageCreateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ image = await async_client.gpu_droplets.images.create(
+ description="Cloud-optimized image w/ small footprint",
+ distribution="Ubuntu",
+ name="ubuntu-18.04-minimal",
+ region="nyc3",
+ tags=["base-image", "prod"],
+ url="http://cloud-images.ubuntu.com/minimal/releases/bionic/release/ubuntu-18.04-minimal-cloudimg-amd64.img",
+ )
+ assert_matches_type(ImageCreateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.images.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ image = await response.parse()
+ assert_matches_type(ImageCreateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.images.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ image = await response.parse()
+ assert_matches_type(ImageCreateResponse, image, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ image = await async_client.gpu_droplets.images.retrieve(
+ 0,
+ )
+ assert_matches_type(ImageRetrieveResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.images.with_raw_response.retrieve(
+ 0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ image = await response.parse()
+ assert_matches_type(ImageRetrieveResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.images.with_streaming_response.retrieve(
+ 0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ image = await response.parse()
+ assert_matches_type(ImageRetrieveResponse, image, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ image = await async_client.gpu_droplets.images.update(
+ image_id=62137902,
+ )
+ assert_matches_type(ImageUpdateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ image = await async_client.gpu_droplets.images.update(
+ image_id=62137902,
+ description=" ",
+ distribution="Ubuntu",
+ name="Nifty New Snapshot",
+ )
+ assert_matches_type(ImageUpdateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.images.with_raw_response.update(
+ image_id=62137902,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ image = await response.parse()
+ assert_matches_type(ImageUpdateResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.images.with_streaming_response.update(
+ image_id=62137902,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ image = await response.parse()
+ assert_matches_type(ImageUpdateResponse, image, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ image = await async_client.gpu_droplets.images.list()
+ assert_matches_type(ImageListResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ image = await async_client.gpu_droplets.images.list(
+ page=1,
+ per_page=1,
+ private=True,
+ tag_name="tag_name",
+ type="application",
+ )
+ assert_matches_type(ImageListResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.images.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ image = await response.parse()
+ assert_matches_type(ImageListResponse, image, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.images.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ image = await response.parse()
+ assert_matches_type(ImageListResponse, image, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ image = await async_client.gpu_droplets.images.delete(
+ 62137902,
+ )
+ assert image is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.images.with_raw_response.delete(
+ 62137902,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ image = await response.parse()
+ assert image is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.images.with_streaming_response.delete(
+ 62137902,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ image = await response.parse()
+ assert image is None
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/gpu_droplets/test_load_balancers.py b/tests/api_resources/gpu_droplets/test_load_balancers.py
new file mode 100644
index 00000000..91138402
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_load_balancers.py
@@ -0,0 +1,1443 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import (
+ LoadBalancerListResponse,
+ LoadBalancerCreateResponse,
+ LoadBalancerUpdateResponse,
+ LoadBalancerRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestLoadBalancers:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_1(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "tls_passthrough": False,
+ }
+ ],
+ algorithm="round_robin",
+ disable_lets_encrypt_dns_records=True,
+ domains=[
+ {
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "is_managed": True,
+ "name": "example.com",
+ }
+ ],
+ droplet_ids=[3164444, 3164445],
+ enable_backend_keepalive=True,
+ enable_proxy_protocol=True,
+ firewall={
+ "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ },
+ glb_settings={
+ "cdn": {"is_enabled": True},
+ "failover_threshold": 50,
+ "region_priorities": {
+ "nyc1": 1,
+ "fra1": 2,
+ "sgp1": 3,
+ },
+ "target_port": 80,
+ "target_protocol": "http",
+ },
+ health_check={
+ "check_interval_seconds": 10,
+ "healthy_threshold": 3,
+ "path": "/",
+ "port": 80,
+ "protocol": "http",
+ "response_timeout_seconds": 5,
+ "unhealthy_threshold": 5,
+ },
+ http_idle_timeout_seconds=90,
+ name="example-lb-01",
+ network="EXTERNAL",
+ network_stack="IPV4",
+ project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ redirect_http_to_https=True,
+ region="nyc3",
+ size="lb-small",
+ size_unit=3,
+ sticky_sessions={
+ "cookie_name": "DO-LB",
+ "cookie_ttl_seconds": 300,
+ "type": "cookies",
+ },
+ target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"],
+ tls_cipher_policy="STRONG",
+ type="REGIONAL",
+ vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b",
+ )
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.with_raw_response.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.with_streaming_response.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_2(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "tls_passthrough": False,
+ }
+ ],
+ algorithm="round_robin",
+ disable_lets_encrypt_dns_records=True,
+ domains=[
+ {
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "is_managed": True,
+ "name": "example.com",
+ }
+ ],
+ enable_backend_keepalive=True,
+ enable_proxy_protocol=True,
+ firewall={
+ "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ },
+ glb_settings={
+ "cdn": {"is_enabled": True},
+ "failover_threshold": 50,
+ "region_priorities": {
+ "nyc1": 1,
+ "fra1": 2,
+ "sgp1": 3,
+ },
+ "target_port": 80,
+ "target_protocol": "http",
+ },
+ health_check={
+ "check_interval_seconds": 10,
+ "healthy_threshold": 3,
+ "path": "/",
+ "port": 80,
+ "protocol": "http",
+ "response_timeout_seconds": 5,
+ "unhealthy_threshold": 5,
+ },
+ http_idle_timeout_seconds=90,
+ name="example-lb-01",
+ network="EXTERNAL",
+ network_stack="IPV4",
+ project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ redirect_http_to_https=True,
+ region="nyc3",
+ size="lb-small",
+ size_unit=3,
+ sticky_sessions={
+ "cookie_name": "DO-LB",
+ "cookie_ttl_seconds": 300,
+ "type": "cookies",
+ },
+ tag="prod:web",
+ target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"],
+ tls_cipher_policy="STRONG",
+ type="REGIONAL",
+ vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b",
+ )
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.with_raw_response.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.with_streaming_response.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.retrieve(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+ assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.with_raw_response.retrieve(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.with_streaming_response.retrieve(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ client.gpu_droplets.load_balancers.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_overload_1(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params_overload_1(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "tls_passthrough": False,
+ }
+ ],
+ algorithm="round_robin",
+ disable_lets_encrypt_dns_records=True,
+ domains=[
+ {
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "is_managed": True,
+ "name": "example.com",
+ }
+ ],
+ droplet_ids=[3164444, 3164445],
+ enable_backend_keepalive=True,
+ enable_proxy_protocol=True,
+ firewall={
+ "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ },
+ glb_settings={
+ "cdn": {"is_enabled": True},
+ "failover_threshold": 50,
+ "region_priorities": {
+ "nyc1": 1,
+ "fra1": 2,
+ "sgp1": 3,
+ },
+ "target_port": 80,
+ "target_protocol": "http",
+ },
+ health_check={
+ "check_interval_seconds": 10,
+ "healthy_threshold": 3,
+ "path": "/",
+ "port": 80,
+ "protocol": "http",
+ "response_timeout_seconds": 5,
+ "unhealthy_threshold": 5,
+ },
+ http_idle_timeout_seconds=90,
+ name="example-lb-01",
+ network="EXTERNAL",
+ network_stack="IPV4",
+ project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ redirect_http_to_https=True,
+ region="nyc3",
+ size="lb-small",
+ size_unit=3,
+ sticky_sessions={
+ "cookie_name": "DO-LB",
+ "cookie_ttl_seconds": 300,
+ "type": "cookies",
+ },
+ target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"],
+ tls_cipher_policy="STRONG",
+ type="REGIONAL",
+ vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b",
+ )
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.with_raw_response.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.with_streaming_response.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update_overload_1(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ client.gpu_droplets.load_balancers.with_raw_response.update(
+ lb_id="",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_overload_2(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params_overload_2(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "tls_passthrough": False,
+ }
+ ],
+ algorithm="round_robin",
+ disable_lets_encrypt_dns_records=True,
+ domains=[
+ {
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "is_managed": True,
+ "name": "example.com",
+ }
+ ],
+ enable_backend_keepalive=True,
+ enable_proxy_protocol=True,
+ firewall={
+ "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ },
+ glb_settings={
+ "cdn": {"is_enabled": True},
+ "failover_threshold": 50,
+ "region_priorities": {
+ "nyc1": 1,
+ "fra1": 2,
+ "sgp1": 3,
+ },
+ "target_port": 80,
+ "target_protocol": "http",
+ },
+ health_check={
+ "check_interval_seconds": 10,
+ "healthy_threshold": 3,
+ "path": "/",
+ "port": 80,
+ "protocol": "http",
+ "response_timeout_seconds": 5,
+ "unhealthy_threshold": 5,
+ },
+ http_idle_timeout_seconds=90,
+ name="example-lb-01",
+ network="EXTERNAL",
+ network_stack="IPV4",
+ project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ redirect_http_to_https=True,
+ region="nyc3",
+ size="lb-small",
+ size_unit=3,
+ sticky_sessions={
+ "cookie_name": "DO-LB",
+ "cookie_ttl_seconds": 300,
+ "type": "cookies",
+ },
+ tag="prod:web",
+ target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"],
+ tls_cipher_policy="STRONG",
+ type="REGIONAL",
+ vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b",
+ )
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.with_raw_response.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.with_streaming_response.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update_overload_2(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ client.gpu_droplets.load_balancers.with_raw_response.update(
+ lb_id="",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.list()
+ assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = response.parse()
+ assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.delete(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+ assert load_balancer is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.with_raw_response.delete(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = response.parse()
+ assert load_balancer is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.with_streaming_response.delete(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = response.parse()
+ assert load_balancer is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ client.gpu_droplets.load_balancers.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete_cache(self, client: Gradient) -> None:
+ load_balancer = client.gpu_droplets.load_balancers.delete_cache(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+ assert load_balancer is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete_cache(self, client: Gradient) -> None:
+ response = client.gpu_droplets.load_balancers.with_raw_response.delete_cache(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = response.parse()
+ assert load_balancer is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete_cache(self, client: Gradient) -> None:
+ with client.gpu_droplets.load_balancers.with_streaming_response.delete_cache(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = response.parse()
+ assert load_balancer is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete_cache(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ client.gpu_droplets.load_balancers.with_raw_response.delete_cache(
+ "",
+ )
+
+
+class TestAsyncLoadBalancers:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "tls_passthrough": False,
+ }
+ ],
+ algorithm="round_robin",
+ disable_lets_encrypt_dns_records=True,
+ domains=[
+ {
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "is_managed": True,
+ "name": "example.com",
+ }
+ ],
+ droplet_ids=[3164444, 3164445],
+ enable_backend_keepalive=True,
+ enable_proxy_protocol=True,
+ firewall={
+ "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ },
+ glb_settings={
+ "cdn": {"is_enabled": True},
+ "failover_threshold": 50,
+ "region_priorities": {
+ "nyc1": 1,
+ "fra1": 2,
+ "sgp1": 3,
+ },
+ "target_port": 80,
+ "target_protocol": "http",
+ },
+ health_check={
+ "check_interval_seconds": 10,
+ "healthy_threshold": 3,
+ "path": "/",
+ "port": 80,
+ "protocol": "http",
+ "response_timeout_seconds": 5,
+ "unhealthy_threshold": 5,
+ },
+ http_idle_timeout_seconds=90,
+ name="example-lb-01",
+ network="EXTERNAL",
+ network_stack="IPV4",
+ project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ redirect_http_to_https=True,
+ region="nyc3",
+ size="lb-small",
+ size_unit=3,
+ sticky_sessions={
+ "cookie_name": "DO-LB",
+ "cookie_ttl_seconds": 300,
+ "type": "cookies",
+ },
+ target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"],
+ tls_cipher_policy="STRONG",
+ type="REGIONAL",
+ vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b",
+ )
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.with_raw_response.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.with_streaming_response.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "tls_passthrough": False,
+ }
+ ],
+ algorithm="round_robin",
+ disable_lets_encrypt_dns_records=True,
+ domains=[
+ {
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "is_managed": True,
+ "name": "example.com",
+ }
+ ],
+ enable_backend_keepalive=True,
+ enable_proxy_protocol=True,
+ firewall={
+ "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ },
+ glb_settings={
+ "cdn": {"is_enabled": True},
+ "failover_threshold": 50,
+ "region_priorities": {
+ "nyc1": 1,
+ "fra1": 2,
+ "sgp1": 3,
+ },
+ "target_port": 80,
+ "target_protocol": "http",
+ },
+ health_check={
+ "check_interval_seconds": 10,
+ "healthy_threshold": 3,
+ "path": "/",
+ "port": 80,
+ "protocol": "http",
+ "response_timeout_seconds": 5,
+ "unhealthy_threshold": 5,
+ },
+ http_idle_timeout_seconds=90,
+ name="example-lb-01",
+ network="EXTERNAL",
+ network_stack="IPV4",
+ project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ redirect_http_to_https=True,
+ region="nyc3",
+ size="lb-small",
+ size_unit=3,
+ sticky_sessions={
+ "cookie_name": "DO-LB",
+ "cookie_ttl_seconds": 300,
+ "type": "cookies",
+ },
+ tag="prod:web",
+ target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"],
+ tls_cipher_policy="STRONG",
+ type="REGIONAL",
+ vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b",
+ )
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.with_raw_response.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.with_streaming_response.create(
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.retrieve(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+ assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.with_raw_response.retrieve(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.with_streaming_response.retrieve(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ await async_client.gpu_droplets.load_balancers.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_overload_1(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "tls_passthrough": False,
+ }
+ ],
+ algorithm="round_robin",
+ disable_lets_encrypt_dns_records=True,
+ domains=[
+ {
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "is_managed": True,
+ "name": "example.com",
+ }
+ ],
+ droplet_ids=[3164444, 3164445],
+ enable_backend_keepalive=True,
+ enable_proxy_protocol=True,
+ firewall={
+ "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ },
+ glb_settings={
+ "cdn": {"is_enabled": True},
+ "failover_threshold": 50,
+ "region_priorities": {
+ "nyc1": 1,
+ "fra1": 2,
+ "sgp1": 3,
+ },
+ "target_port": 80,
+ "target_protocol": "http",
+ },
+ health_check={
+ "check_interval_seconds": 10,
+ "healthy_threshold": 3,
+ "path": "/",
+ "port": 80,
+ "protocol": "http",
+ "response_timeout_seconds": 5,
+ "unhealthy_threshold": 5,
+ },
+ http_idle_timeout_seconds=90,
+ name="example-lb-01",
+ network="EXTERNAL",
+ network_stack="IPV4",
+ project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ redirect_http_to_https=True,
+ region="nyc3",
+ size="lb-small",
+ size_unit=3,
+ sticky_sessions={
+ "cookie_name": "DO-LB",
+ "cookie_ttl_seconds": 300,
+ "type": "cookies",
+ },
+ target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"],
+ tls_cipher_policy="STRONG",
+ type="REGIONAL",
+ vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b",
+ )
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.with_raw_response.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.with_streaming_response.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update_overload_1(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ await async_client.gpu_droplets.load_balancers.with_raw_response.update(
+ lb_id="",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_overload_2(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "tls_passthrough": False,
+ }
+ ],
+ algorithm="round_robin",
+ disable_lets_encrypt_dns_records=True,
+ domains=[
+ {
+ "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf",
+ "is_managed": True,
+ "name": "example.com",
+ }
+ ],
+ enable_backend_keepalive=True,
+ enable_proxy_protocol=True,
+ firewall={
+ "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"],
+ },
+ glb_settings={
+ "cdn": {"is_enabled": True},
+ "failover_threshold": 50,
+ "region_priorities": {
+ "nyc1": 1,
+ "fra1": 2,
+ "sgp1": 3,
+ },
+ "target_port": 80,
+ "target_protocol": "http",
+ },
+ health_check={
+ "check_interval_seconds": 10,
+ "healthy_threshold": 3,
+ "path": "/",
+ "port": 80,
+ "protocol": "http",
+ "response_timeout_seconds": 5,
+ "unhealthy_threshold": 5,
+ },
+ http_idle_timeout_seconds=90,
+ name="example-lb-01",
+ network="EXTERNAL",
+ network_stack="IPV4",
+ project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ redirect_http_to_https=True,
+ region="nyc3",
+ size="lb-small",
+ size_unit=3,
+ sticky_sessions={
+ "cookie_name": "DO-LB",
+ "cookie_ttl_seconds": 300,
+ "type": "cookies",
+ },
+ tag="prod:web",
+ target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"],
+ tls_cipher_policy="STRONG",
+ type="REGIONAL",
+ vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b",
+ )
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.with_raw_response.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.with_streaming_response.update(
+ lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update_overload_2(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ await async_client.gpu_droplets.load_balancers.with_raw_response.update(
+ lb_id="",
+ forwarding_rules=[
+ {
+ "entry_port": 443,
+ "entry_protocol": "https",
+ "target_port": 80,
+ "target_protocol": "http",
+ }
+ ],
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.list()
+ assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = await response.parse()
+ assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.delete(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+ assert load_balancer is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = await response.parse()
+ assert load_balancer is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = await response.parse()
+ assert load_balancer is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ await async_client.gpu_droplets.load_balancers.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete_cache(self, async_client: AsyncGradient) -> None:
+ load_balancer = await async_client.gpu_droplets.load_balancers.delete_cache(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+ assert load_balancer is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete_cache(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete_cache(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ load_balancer = await response.parse()
+ assert load_balancer is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete_cache(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete_cache(
+ "4de7ac8b-495b-4884-9a69-1050c6793cd6",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ load_balancer = await response.parse()
+ assert load_balancer is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete_cache(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"):
+ await async_client.gpu_droplets.load_balancers.with_raw_response.delete_cache(
+ "",
+ )
diff --git a/tests/api_resources/gpu_droplets/test_sizes.py b/tests/api_resources/gpu_droplets/test_sizes.py
new file mode 100644
index 00000000..308694ac
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_sizes.py
@@ -0,0 +1,98 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import SizeListResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestSizes:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ size = client.gpu_droplets.sizes.list()
+ assert_matches_type(SizeListResponse, size, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ size = client.gpu_droplets.sizes.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(SizeListResponse, size, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.sizes.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ size = response.parse()
+ assert_matches_type(SizeListResponse, size, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.sizes.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ size = response.parse()
+ assert_matches_type(SizeListResponse, size, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncSizes:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ size = await async_client.gpu_droplets.sizes.list()
+ assert_matches_type(SizeListResponse, size, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ size = await async_client.gpu_droplets.sizes.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(SizeListResponse, size, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.sizes.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ size = await response.parse()
+ assert_matches_type(SizeListResponse, size, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.sizes.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ size = await response.parse()
+ assert_matches_type(SizeListResponse, size, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/gpu_droplets/test_snapshots.py b/tests/api_resources/gpu_droplets/test_snapshots.py
new file mode 100644
index 00000000..ca4d146f
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_snapshots.py
@@ -0,0 +1,236 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestSnapshots:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ snapshot = client.gpu_droplets.snapshots.retrieve(
+ 6372321,
+ )
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.snapshots.with_raw_response.retrieve(
+ 6372321,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.snapshots.with_streaming_response.retrieve(
+ 6372321,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ snapshot = client.gpu_droplets.snapshots.list()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ snapshot = client.gpu_droplets.snapshots.list(
+ page=1,
+ per_page=1,
+ resource_type="droplet",
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.snapshots.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.snapshots.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ snapshot = client.gpu_droplets.snapshots.delete(
+ 6372321,
+ )
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.gpu_droplets.snapshots.with_raw_response.delete(
+ 6372321,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.gpu_droplets.snapshots.with_streaming_response.delete(
+ 6372321,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert snapshot is None
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncSnapshots:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.gpu_droplets.snapshots.retrieve(
+ 6372321,
+ )
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.snapshots.with_raw_response.retrieve(
+ 6372321,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.snapshots.with_streaming_response.retrieve(
+ 6372321,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.gpu_droplets.snapshots.list()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.gpu_droplets.snapshots.list(
+ page=1,
+ per_page=1,
+ resource_type="droplet",
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.snapshots.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.snapshots.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.gpu_droplets.snapshots.delete(
+ 6372321,
+ )
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.snapshots.with_raw_response.delete(
+ 6372321,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.snapshots.with_streaming_response.delete(
+ 6372321,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert snapshot is None
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/gpu_droplets/test_volumes.py b/tests/api_resources/gpu_droplets/test_volumes.py
new file mode 100644
index 00000000..c0d83f63
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/test_volumes.py
@@ -0,0 +1,568 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets import (
+ VolumeListResponse,
+ VolumeCreateResponse,
+ VolumeRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestVolumes:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_1(self, client: Gradient) -> None:
+ volume = client.gpu_droplets.volumes.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ )
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None:
+ volume = client.gpu_droplets.volumes.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ description="Block store for examples",
+ filesystem_label="example",
+ filesystem_type="ext4",
+ snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33",
+ tags=["base-image", "prod"],
+ )
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.with_raw_response.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = response.parse()
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.with_streaming_response.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = response.parse()
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_2(self, client: Gradient) -> None:
+ volume = client.gpu_droplets.volumes.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ )
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None:
+ volume = client.gpu_droplets.volumes.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ description="Block store for examples",
+ filesystem_label="example",
+ filesystem_type="ext4",
+ snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33",
+ tags=["base-image", "prod"],
+ )
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.with_raw_response.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = response.parse()
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.with_streaming_response.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = response.parse()
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ volume = client.gpu_droplets.volumes.retrieve(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ )
+ assert_matches_type(VolumeRetrieveResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.with_raw_response.retrieve(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = response.parse()
+ assert_matches_type(VolumeRetrieveResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.with_streaming_response.retrieve(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = response.parse()
+ assert_matches_type(VolumeRetrieveResponse, volume, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ client.gpu_droplets.volumes.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ volume = client.gpu_droplets.volumes.list()
+ assert_matches_type(VolumeListResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ volume = client.gpu_droplets.volumes.list(
+ name="name",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ )
+ assert_matches_type(VolumeListResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = response.parse()
+ assert_matches_type(VolumeListResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = response.parse()
+ assert_matches_type(VolumeListResponse, volume, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ volume = client.gpu_droplets.volumes.delete(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ )
+ assert volume is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.with_raw_response.delete(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = response.parse()
+ assert volume is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.with_streaming_response.delete(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = response.parse()
+ assert volume is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ client.gpu_droplets.volumes.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete_by_name(self, client: Gradient) -> None:
+ volume = client.gpu_droplets.volumes.delete_by_name()
+ assert volume is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete_by_name_with_all_params(self, client: Gradient) -> None:
+ volume = client.gpu_droplets.volumes.delete_by_name(
+ name="name",
+ region="nyc3",
+ )
+ assert volume is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete_by_name(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.with_raw_response.delete_by_name()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = response.parse()
+ assert volume is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete_by_name(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.with_streaming_response.delete_by_name() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = response.parse()
+ assert volume is None
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncVolumes:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
+ volume = await async_client.gpu_droplets.volumes.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ )
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ volume = await async_client.gpu_droplets.volumes.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ description="Block store for examples",
+ filesystem_label="example",
+ filesystem_type="ext4",
+ snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33",
+ tags=["base-image", "prod"],
+ )
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.with_raw_response.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = await response.parse()
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.with_streaming_response.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = await response.parse()
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
+ volume = await async_client.gpu_droplets.volumes.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ )
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ volume = await async_client.gpu_droplets.volumes.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ description="Block store for examples",
+ filesystem_label="example",
+ filesystem_type="ext4",
+ snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33",
+ tags=["base-image", "prod"],
+ )
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.with_raw_response.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = await response.parse()
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.with_streaming_response.create(
+ name="example",
+ region="nyc3",
+ size_gigabytes=10,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = await response.parse()
+ assert_matches_type(VolumeCreateResponse, volume, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ volume = await async_client.gpu_droplets.volumes.retrieve(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ )
+ assert_matches_type(VolumeRetrieveResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.with_raw_response.retrieve(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = await response.parse()
+ assert_matches_type(VolumeRetrieveResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.with_streaming_response.retrieve(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = await response.parse()
+ assert_matches_type(VolumeRetrieveResponse, volume, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ await async_client.gpu_droplets.volumes.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ volume = await async_client.gpu_droplets.volumes.list()
+ assert_matches_type(VolumeListResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ volume = await async_client.gpu_droplets.volumes.list(
+ name="name",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ )
+ assert_matches_type(VolumeListResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = await response.parse()
+ assert_matches_type(VolumeListResponse, volume, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = await response.parse()
+ assert_matches_type(VolumeListResponse, volume, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ volume = await async_client.gpu_droplets.volumes.delete(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ )
+ assert volume is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.with_raw_response.delete(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = await response.parse()
+ assert volume is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.with_streaming_response.delete(
+ "7724db7c-e098-11e5-b522-000f53304e51",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = await response.parse()
+ assert volume is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ await async_client.gpu_droplets.volumes.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete_by_name(self, async_client: AsyncGradient) -> None:
+ volume = await async_client.gpu_droplets.volumes.delete_by_name()
+ assert volume is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete_by_name_with_all_params(self, async_client: AsyncGradient) -> None:
+ volume = await async_client.gpu_droplets.volumes.delete_by_name(
+ name="name",
+ region="nyc3",
+ )
+ assert volume is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete_by_name(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.with_raw_response.delete_by_name()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ volume = await response.parse()
+ assert volume is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete_by_name(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.with_streaming_response.delete_by_name() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ volume = await response.parse()
+ assert volume is None
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/gpu_droplets/volumes/__init__.py b/tests/api_resources/gpu_droplets/volumes/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/volumes/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/gpu_droplets/volumes/test_actions.py b/tests/api_resources/gpu_droplets/volumes/test_actions.py
new file mode 100644
index 00000000..f1bb3a21
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/volumes/test_actions.py
@@ -0,0 +1,825 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets.volumes import (
+ ActionListResponse,
+ ActionRetrieveResponse,
+ ActionInitiateByIDResponse,
+ ActionInitiateByNameResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestActions:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.retrieve(
+ action_id=36804636,
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_with_all_params(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.retrieve(
+ action_id=36804636,
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.actions.with_streaming_response.retrieve(
+ action_id=36804636,
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ client.gpu_droplets.volumes.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ volume_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.actions.with_raw_response.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.actions.with_streaming_response.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ client.gpu_droplets.volumes.actions.with_raw_response.list(
+ volume_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_by_id_overload_1(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_by_id_with_all_params_overload_1(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ tags=["base-image", "prod"],
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_by_id_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_by_id_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_initiate_by_id_overload_1(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="",
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_by_id_overload_2(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_by_id_with_all_params_overload_2(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_by_id_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_by_id_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_initiate_by_id_overload_2(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="",
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_by_id_overload_3(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ size_gigabytes=16384,
+ type="attach",
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_by_id_with_all_params_overload_3(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ size_gigabytes=16384,
+ type="attach",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_by_id_overload_3(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ size_gigabytes=16384,
+ type="attach",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_by_id_overload_3(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ size_gigabytes=16384,
+ type="attach",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_initiate_by_id_overload_3(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="",
+ size_gigabytes=16384,
+ type="attach",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_by_name_overload_1(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ )
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_by_name_with_all_params_overload_1(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ tags=["base-image", "prod"],
+ )
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_by_name_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_by_name_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_by_name_overload_2(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ )
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_by_name_with_all_params_overload_2(self, client: Gradient) -> None:
+ action = client.gpu_droplets.volumes.actions.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ )
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_by_name_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = response.parse()
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_by_name_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = response.parse()
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncActions:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.retrieve(
+ action_id=36804636,
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.retrieve(
+ action_id=36804636,
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.actions.with_streaming_response.retrieve(
+ action_id=36804636,
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionRetrieveResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ await async_client.gpu_droplets.volumes.actions.with_raw_response.retrieve(
+ action_id=36804636,
+ volume_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.actions.with_raw_response.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.actions.with_streaming_response.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionListResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ await async_client.gpu_droplets.volumes.actions.with_raw_response.list(
+ volume_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_by_id_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ tags=["base-image", "prod"],
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="",
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_by_id_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ droplet_id=11612190,
+ type="attach",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="",
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ size_gigabytes=16384,
+ type="attach",
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_by_id_with_all_params_overload_3(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ size_gigabytes=16384,
+ type="attach",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ )
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ size_gigabytes=16384,
+ type="attach",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ size_gigabytes=16384,
+ type="attach",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateByIDResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id(
+ volume_id="",
+ size_gigabytes=16384,
+ type="attach",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_by_name_overload_1(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ )
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_by_name_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ tags=["base-image", "prod"],
+ )
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_by_name_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_by_name_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_by_name_overload_2(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ )
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_by_name_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ action = await async_client.gpu_droplets.volumes.actions.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ page=1,
+ per_page=1,
+ region="nyc3",
+ )
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_by_name_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ action = await response.parse()
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_by_name_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name(
+ droplet_id=11612190,
+ type="attach",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ action = await response.parse()
+ assert_matches_type(ActionInitiateByNameResponse, action, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py
new file mode 100644
index 00000000..ae47fc90
--- /dev/null
+++ b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py
@@ -0,0 +1,412 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.gpu_droplets.volumes import (
+ SnapshotListResponse,
+ SnapshotCreateResponse,
+ SnapshotRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestSnapshots:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ snapshot = client.gpu_droplets.volumes.snapshots.create(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ name="big-data-snapshot1475261774",
+ )
+ assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ snapshot = client.gpu_droplets.volumes.snapshots.create(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ name="big-data-snapshot1475261774",
+ tags=["base-image", "prod"],
+ )
+ assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.snapshots.with_raw_response.create(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ name="big-data-snapshot1475261774",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.snapshots.with_streaming_response.create(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ name="big-data-snapshot1475261774",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_create(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ client.gpu_droplets.volumes.snapshots.with_raw_response.create(
+ volume_id="",
+ name="big-data-snapshot1475261774",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ snapshot = client.gpu_droplets.volumes.snapshots.retrieve(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ )
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"):
+ client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ snapshot = client.gpu_droplets.volumes.snapshots.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ snapshot = client.gpu_droplets.volumes.snapshots.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.snapshots.with_raw_response.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.snapshots.with_streaming_response.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ client.gpu_droplets.volumes.snapshots.with_raw_response.list(
+ volume_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ snapshot = client.gpu_droplets.volumes.snapshots.delete(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ )
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.gpu_droplets.volumes.snapshots.with_raw_response.delete(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.gpu_droplets.volumes.snapshots.with_streaming_response.delete(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert snapshot is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"):
+ client.gpu_droplets.volumes.snapshots.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncSnapshots:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.gpu_droplets.volumes.snapshots.create(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ name="big-data-snapshot1475261774",
+ )
+ assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.gpu_droplets.volumes.snapshots.create(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ name="big-data-snapshot1475261774",
+ tags=["base-image", "prod"],
+ )
+ assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.create(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ name="big-data-snapshot1475261774",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.create(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ name="big-data-snapshot1475261774",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ await async_client.gpu_droplets.volumes.snapshots.with_raw_response.create(
+ volume_id="",
+ name="big-data-snapshot1475261774",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.gpu_droplets.volumes.snapshots.retrieve(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ )
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"):
+ await async_client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.gpu_droplets.volumes.snapshots.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.gpu_droplets.volumes.snapshots.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.list(
+ volume_id="7724db7c-e098-11e5-b522-000f53304e51",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"):
+ await async_client.gpu_droplets.volumes.snapshots.with_raw_response.list(
+ volume_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.gpu_droplets.volumes.snapshots.delete(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ )
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.delete(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.delete(
+ "fbe805e8-866b-11e6-96bf-000f53315a41",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert snapshot is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"):
+ await async_client.gpu_droplets.volumes.snapshots.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/inference/__init__.py b/tests/api_resources/inference/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/inference/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py
new file mode 100644
index 00000000..99a9e553
--- /dev/null
+++ b/tests/api_resources/inference/test_api_keys.py
@@ -0,0 +1,448 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.inference import (
+ APIKeyListResponse,
+ APIKeyCreateResponse,
+ APIKeyDeleteResponse,
+ APIKeyUpdateResponse,
+ APIKeyUpdateRegenerateResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAPIKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ api_key = client.inference.api_keys.create()
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ api_key = client.inference.api_keys.create(
+ name="Production Key",
+ )
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.inference.api_keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.inference.api_keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ api_key = client.inference.api_keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ api_key = client.inference.api_keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.inference.api_keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.inference.api_keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.inference.api_keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ api_key = client.inference.api_keys.list()
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ api_key = client.inference.api_keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.inference.api_keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.inference.api_keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ api_key = client.inference.api_keys.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.inference.api_keys.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.inference.api_keys.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.inference.api_keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_regenerate(self, client: Gradient) -> None:
+ api_key = client.inference.api_keys.update_regenerate(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update_regenerate(self, client: Gradient) -> None:
+ response = client.inference.api_keys.with_raw_response.update_regenerate(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = response.parse()
+ assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update_regenerate(self, client: Gradient) -> None:
+ with client.inference.api_keys.with_streaming_response.update_regenerate(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = response.parse()
+ assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update_regenerate(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.inference.api_keys.with_raw_response.update_regenerate(
+ "",
+ )
+
+
+class TestAsyncAPIKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.inference.api_keys.create()
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.inference.api_keys.create(
+ name="Production Key",
+ )
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.inference.api_keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = await response.parse()
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.inference.api_keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyCreateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.inference.api_keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.inference.api_keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.inference.api_keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = await response.parse()
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.inference.api_keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.inference.api_keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.inference.api_keys.list()
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.inference.api_keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.inference.api_keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = await response.parse()
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.inference.api_keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyListResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.inference.api_keys.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.inference.api_keys.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = await response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.inference.api_keys.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.inference.api_keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_regenerate(self, async_client: AsyncGradient) -> None:
+ api_key = await async_client.inference.api_keys.update_regenerate(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update_regenerate(self, async_client: AsyncGradient) -> None:
+ response = await async_client.inference.api_keys.with_raw_response.update_regenerate(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ api_key = await response.parse()
+ assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update_regenerate(self, async_client: AsyncGradient) -> None:
+ async with async_client.inference.api_keys.with_streaming_response.update_regenerate(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ api_key = await response.parse()
+ assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update_regenerate(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.inference.api_keys.with_raw_response.update_regenerate(
+ "",
+ )
diff --git a/tests/api_resources/knowledge_bases/__init__.py b/tests/api_resources/knowledge_bases/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/knowledge_bases/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py
new file mode 100644
index 00000000..e8f430b4
--- /dev/null
+++ b/tests/api_resources/knowledge_bases/test_data_sources.py
@@ -0,0 +1,622 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.knowledge_bases import (
+ DataSourceListResponse,
+ DataSourceCreateResponse,
+ DataSourceDeleteResponse,
+ DataSourceUpdateResponse,
+ DataSourceCreatePresignedURLsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestDataSources:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.create(
+ path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(DataSourceCreateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.create(
+ path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ aws_data_source={
+ "bucket_name": "example name",
+ "item_path": "example string",
+ "key_id": "123e4567-e89b-12d3-a456-426614174000",
+ "region": "example string",
+ "secret_key": "example string",
+ },
+ chunking_algorithm="CHUNKING_ALGORITHM_SECTION_BASED",
+ chunking_options={
+ "child_chunk_size": 350,
+ "max_chunk_size": 750,
+ "parent_chunk_size": 1000,
+ "semantic_threshold": 0.5,
+ },
+ body_knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"',
+ spaces_data_source={
+ "bucket_name": "example name",
+ "item_path": "example string",
+ "region": "example string",
+ },
+ web_crawler_data_source={
+ "base_url": "example string",
+ "crawling_option": "UNKNOWN",
+ "embed_media": True,
+ "exclude_tags": ["example string"],
+ },
+ )
+ assert_matches_type(DataSourceCreateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.knowledge_bases.data_sources.with_raw_response.create(
+ path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = response.parse()
+ assert_matches_type(DataSourceCreateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.knowledge_bases.data_sources.with_streaming_response.create(
+ path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = response.parse()
+ assert_matches_type(DataSourceCreateResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_create(self, client: Gradient) -> None:
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''"
+ ):
+ client.knowledge_bases.data_sources.with_raw_response.create(
+ path_knowledge_base_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.update(
+ path_data_source_uuid="123e4567-e89b-12d3-a456-426614174000",
+ path_knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(DataSourceUpdateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.update(
+ path_data_source_uuid="123e4567-e89b-12d3-a456-426614174000",
+ path_knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ chunking_algorithm="CHUNKING_ALGORITHM_SECTION_BASED",
+ chunking_options={
+ "child_chunk_size": 350,
+ "max_chunk_size": 750,
+ "parent_chunk_size": 1000,
+ "semantic_threshold": 0.5,
+ },
+ body_data_source_uuid="98765432-1234-1234-1234-123456789012",
+ body_knowledge_base_uuid="12345678-1234-1234-1234-123456789012",
+ )
+ assert_matches_type(DataSourceUpdateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.knowledge_bases.data_sources.with_raw_response.update(
+ path_data_source_uuid="123e4567-e89b-12d3-a456-426614174000",
+ path_knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = response.parse()
+ assert_matches_type(DataSourceUpdateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.knowledge_bases.data_sources.with_streaming_response.update(
+ path_data_source_uuid="123e4567-e89b-12d3-a456-426614174000",
+ path_knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = response.parse()
+ assert_matches_type(DataSourceUpdateResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''"
+ ):
+ client.knowledge_bases.data_sources.with_raw_response.update(
+ path_data_source_uuid="123e4567-e89b-12d3-a456-426614174000",
+ path_knowledge_base_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_data_source_uuid` but received ''"):
+ client.knowledge_bases.data_sources.with_raw_response.update(
+ path_data_source_uuid="",
+ path_knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.list(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(DataSourceListResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.list(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(DataSourceListResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.knowledge_bases.data_sources.with_raw_response.list(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = response.parse()
+ assert_matches_type(DataSourceListResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.knowledge_bases.data_sources.with_streaming_response.list(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = response.parse()
+ assert_matches_type(DataSourceListResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ client.knowledge_bases.data_sources.with_raw_response.list(
+ knowledge_base_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.delete(
+ data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.knowledge_bases.data_sources.with_raw_response.delete(
+ data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = response.parse()
+ assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.knowledge_bases.data_sources.with_streaming_response.delete(
+ data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = response.parse()
+ assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ client.knowledge_bases.data_sources.with_raw_response.delete(
+ data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ knowledge_base_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"):
+ client.knowledge_bases.data_sources.with_raw_response.delete(
+ data_source_uuid="",
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_presigned_urls(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.create_presigned_urls()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_presigned_urls_with_all_params(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.create_presigned_urls(
+ files=[
+ {
+ "file_name": "example name",
+ "file_size": "file_size",
+ }
+ ],
+ )
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_presigned_urls(self, client: Gradient) -> None:
+ response = client.knowledge_bases.data_sources.with_raw_response.create_presigned_urls()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = response.parse()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_presigned_urls(self, client: Gradient) -> None:
+ with client.knowledge_bases.data_sources.with_streaming_response.create_presigned_urls() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = response.parse()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncDataSources:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.create(
+ path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(DataSourceCreateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.create(
+ path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ aws_data_source={
+ "bucket_name": "example name",
+ "item_path": "example string",
+ "key_id": "123e4567-e89b-12d3-a456-426614174000",
+ "region": "example string",
+ "secret_key": "example string",
+ },
+ chunking_algorithm="CHUNKING_ALGORITHM_SECTION_BASED",
+ chunking_options={
+ "child_chunk_size": 350,
+ "max_chunk_size": 750,
+ "parent_chunk_size": 1000,
+ "semantic_threshold": 0.5,
+ },
+ body_knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"',
+ spaces_data_source={
+ "bucket_name": "example name",
+ "item_path": "example string",
+ "region": "example string",
+ },
+ web_crawler_data_source={
+ "base_url": "example string",
+ "crawling_option": "UNKNOWN",
+ "embed_media": True,
+ "exclude_tags": ["example string"],
+ },
+ )
+ assert_matches_type(DataSourceCreateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.data_sources.with_raw_response.create(
+ path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = await response.parse()
+ assert_matches_type(DataSourceCreateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.data_sources.with_streaming_response.create(
+ path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = await response.parse()
+ assert_matches_type(DataSourceCreateResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''"
+ ):
+ await async_client.knowledge_bases.data_sources.with_raw_response.create(
+ path_knowledge_base_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.update(
+ path_data_source_uuid="123e4567-e89b-12d3-a456-426614174000",
+ path_knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(DataSourceUpdateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.update(
+ path_data_source_uuid="123e4567-e89b-12d3-a456-426614174000",
+ path_knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ chunking_algorithm="CHUNKING_ALGORITHM_SECTION_BASED",
+ chunking_options={
+ "child_chunk_size": 350,
+ "max_chunk_size": 750,
+ "parent_chunk_size": 1000,
+ "semantic_threshold": 0.5,
+ },
+ body_data_source_uuid="98765432-1234-1234-1234-123456789012",
+ body_knowledge_base_uuid="12345678-1234-1234-1234-123456789012",
+ )
+ assert_matches_type(DataSourceUpdateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.data_sources.with_raw_response.update(
+ path_data_source_uuid="123e4567-e89b-12d3-a456-426614174000",
+ path_knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = await response.parse()
+ assert_matches_type(DataSourceUpdateResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.data_sources.with_streaming_response.update(
+ path_data_source_uuid="123e4567-e89b-12d3-a456-426614174000",
+ path_knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = await response.parse()
+ assert_matches_type(DataSourceUpdateResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''"
+ ):
+ await async_client.knowledge_bases.data_sources.with_raw_response.update(
+ path_data_source_uuid="123e4567-e89b-12d3-a456-426614174000",
+ path_knowledge_base_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_data_source_uuid` but received ''"):
+ await async_client.knowledge_bases.data_sources.with_raw_response.update(
+ path_data_source_uuid="",
+ path_knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.list(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(DataSourceListResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.list(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(DataSourceListResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.data_sources.with_raw_response.list(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = await response.parse()
+ assert_matches_type(DataSourceListResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.data_sources.with_streaming_response.list(
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = await response.parse()
+ assert_matches_type(DataSourceListResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ await async_client.knowledge_bases.data_sources.with_raw_response.list(
+ knowledge_base_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.delete(
+ data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.data_sources.with_raw_response.delete(
+ data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = await response.parse()
+ assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.data_sources.with_streaming_response.delete(
+ data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = await response.parse()
+ assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ await async_client.knowledge_bases.data_sources.with_raw_response.delete(
+ data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ knowledge_base_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"):
+ await async_client.knowledge_bases.data_sources.with_raw_response.delete(
+ data_source_uuid="",
+ knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_presigned_urls(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.create_presigned_urls()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_presigned_urls_with_all_params(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.create_presigned_urls(
+ files=[
+ {
+ "file_name": "example name",
+ "file_size": "file_size",
+ }
+ ],
+ )
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_presigned_urls(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.data_sources.with_raw_response.create_presigned_urls()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = await response.parse()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_presigned_urls(self, async_client: AsyncGradient) -> None:
+ async with (
+ async_client.knowledge_bases.data_sources.with_streaming_response.create_presigned_urls()
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = await response.parse()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py
new file mode 100644
index 00000000..516250be
--- /dev/null
+++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py
@@ -0,0 +1,778 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import httpx
+import pytest
+
+from gradient import Gradient, AsyncGradient, IndexingJobError, IndexingJobTimeoutError
+from tests.utils import assert_matches_type
+from gradient.types.knowledge_bases import (
+ IndexingJobListResponse,
+ IndexingJobCreateResponse,
+ IndexingJobRetrieveResponse,
+ IndexingJobUpdateCancelResponse,
+ IndexingJobRetrieveSignedURLResponse,
+ IndexingJobRetrieveDataSourcesResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestIndexingJobs:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ indexing_job = client.knowledge_bases.indexing_jobs.create()
+ assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ indexing_job = client.knowledge_bases.indexing_jobs.create(
+ data_source_uuids=["example string"],
+ knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ indexing_job = client.knowledge_bases.indexing_jobs.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.knowledge_bases.indexing_jobs.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ indexing_job = client.knowledge_bases.indexing_jobs.list()
+ assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ indexing_job = client.knowledge_bases.indexing_jobs.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_data_sources(self, client: Gradient) -> None:
+ indexing_job = client.knowledge_bases.indexing_jobs.retrieve_data_sources(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve_data_sources(self, client: Gradient) -> None:
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve_data_sources(self, client: Gradient) -> None:
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve_data_sources(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"):
+ client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_signed_url(self, client: Gradient) -> None:
+ indexing_job = client.knowledge_bases.indexing_jobs.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve_signed_url(self, client: Gradient) -> None:
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve_signed_url(self, client: Gradient) -> None:
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve_signed_url(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"):
+ client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_cancel(self, client: Gradient) -> None:
+ indexing_job = client.knowledge_bases.indexing_jobs.update_cancel(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_cancel_with_all_params(self, client: Gradient) -> None:
+ indexing_job = client.knowledge_bases.indexing_jobs.update_cancel(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update_cancel(self, client: Gradient) -> None:
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update_cancel(self, client: Gradient) -> None:
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update_cancel(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
+ client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel(
+ path_uuid="",
+ )
+
+ @parametrize
+ def test_wait_for_completion_raises_indexing_job_error_on_failed(self, client: Gradient, respx_mock: Any) -> None:
+ """Test that IndexingJobError is raised when job phase is FAILED"""
+ job_uuid = "test-job-uuid"
+ respx_mock.get(f"{base_url}/v2/gen-ai/indexing_jobs/{job_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "job": {
+ "uuid": job_uuid,
+ "phase": "BATCH_JOB_PHASE_FAILED",
+ "total_items_indexed": "10",
+ "total_items_failed": "5",
+ }
+ },
+ )
+ )
+
+ with pytest.raises(IndexingJobError) as exc_info:
+ client.knowledge_bases.indexing_jobs.wait_for_completion(job_uuid)
+
+ assert exc_info.value.uuid == job_uuid
+ assert exc_info.value.phase == "BATCH_JOB_PHASE_FAILED"
+ assert "failed" in str(exc_info.value).lower()
+
+ @parametrize
+ def test_wait_for_completion_raises_indexing_job_error_on_error(self, client: Gradient, respx_mock: Any) -> None:
+ """Test that IndexingJobError is raised when job phase is ERROR"""
+ job_uuid = "test-job-uuid"
+ respx_mock.get(f"{base_url}/v2/gen-ai/indexing_jobs/{job_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "job": {
+ "uuid": job_uuid,
+ "phase": "BATCH_JOB_PHASE_ERROR",
+ }
+ },
+ )
+ )
+
+ with pytest.raises(IndexingJobError) as exc_info:
+ client.knowledge_bases.indexing_jobs.wait_for_completion(job_uuid)
+
+ assert exc_info.value.uuid == job_uuid
+ assert exc_info.value.phase == "BATCH_JOB_PHASE_ERROR"
+ assert "error" in str(exc_info.value).lower()
+
+ @parametrize
+ def test_wait_for_completion_raises_indexing_job_error_on_cancelled(
+ self, client: Gradient, respx_mock: Any
+ ) -> None:
+ """Test that IndexingJobError is raised when job phase is CANCELLED"""
+ job_uuid = "test-job-uuid"
+ respx_mock.get(f"{base_url}/v2/gen-ai/indexing_jobs/{job_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "job": {
+ "uuid": job_uuid,
+ "phase": "BATCH_JOB_PHASE_CANCELLED",
+ }
+ },
+ )
+ )
+
+ with pytest.raises(IndexingJobError) as exc_info:
+ client.knowledge_bases.indexing_jobs.wait_for_completion(job_uuid)
+
+ assert exc_info.value.uuid == job_uuid
+ assert exc_info.value.phase == "BATCH_JOB_PHASE_CANCELLED"
+ assert "cancelled" in str(exc_info.value).lower()
+
+ @parametrize
+ def test_wait_for_completion_raises_timeout_error(self, client: Gradient, respx_mock: Any) -> None:
+ """Test that IndexingJobTimeoutError is raised on timeout"""
+ job_uuid = "test-job-uuid"
+ respx_mock.get(f"{base_url}/v2/gen-ai/indexing_jobs/{job_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "job": {
+ "uuid": job_uuid,
+ "phase": "BATCH_JOB_PHASE_RUNNING",
+ }
+ },
+ )
+ )
+
+ with pytest.raises(IndexingJobTimeoutError) as exc_info:
+ client.knowledge_bases.indexing_jobs.wait_for_completion(job_uuid, poll_interval=0.1, timeout=0.2)
+
+ assert exc_info.value.uuid == job_uuid
+ assert exc_info.value.phase == "BATCH_JOB_PHASE_RUNNING"
+ assert exc_info.value.timeout == 0.2
+
+ @parametrize
+ def test_wait_for_completion_succeeds(self, client: Gradient, respx_mock: Any) -> None:
+ """Test that wait_for_completion returns successfully when job succeeds"""
+ job_uuid = "test-job-uuid"
+ respx_mock.get(f"{base_url}/v2/gen-ai/indexing_jobs/{job_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "job": {
+ "uuid": job_uuid,
+ "phase": "BATCH_JOB_PHASE_SUCCEEDED",
+ "total_items_indexed": "100",
+ "total_items_failed": "0",
+ }
+ },
+ )
+ )
+
+ result = client.knowledge_bases.indexing_jobs.wait_for_completion(job_uuid)
+ assert_matches_type(IndexingJobRetrieveResponse, result, path=["response"])
+ assert result.job is not None
+ assert result.job.phase == "BATCH_JOB_PHASE_SUCCEEDED"
+
+
+class TestAsyncIndexingJobs:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.create()
+ assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.create(
+ data_source_uuids=["example string"],
+ knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.list()
+ assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_data_sources(self, async_client: AsyncGradient) -> None:
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_data_sources(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"):
+ await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_signed_url(self, async_client: AsyncGradient) -> None:
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve_signed_url(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve_signed_url(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve_signed_url(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"):
+ await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_cancel(self, async_client: AsyncGradient) -> None:
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradient) -> None:
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update_cancel(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update_cancel(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update_cancel(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
+ await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel(
+ path_uuid="",
+ )
+
+ @parametrize
+ async def test_wait_for_completion_raises_indexing_job_error_on_failed(
+ self, async_client: AsyncGradient, respx_mock: Any
+ ) -> None:
+ """Test that IndexingJobError is raised when job phase is FAILED"""
+ job_uuid = "test-job-uuid"
+ respx_mock.get(f"{base_url}/v2/gen-ai/indexing_jobs/{job_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "job": {
+ "uuid": job_uuid,
+ "phase": "BATCH_JOB_PHASE_FAILED",
+ "total_items_indexed": "10",
+ "total_items_failed": "5",
+ }
+ },
+ )
+ )
+
+ with pytest.raises(IndexingJobError) as exc_info:
+ await async_client.knowledge_bases.indexing_jobs.wait_for_completion(job_uuid)
+
+ assert exc_info.value.uuid == job_uuid
+ assert exc_info.value.phase == "BATCH_JOB_PHASE_FAILED"
+ assert "failed" in str(exc_info.value).lower()
+
+ @parametrize
+ async def test_wait_for_completion_raises_indexing_job_error_on_error(
+ self, async_client: AsyncGradient, respx_mock: Any
+ ) -> None:
+ """Test that IndexingJobError is raised when job phase is ERROR"""
+ job_uuid = "test-job-uuid"
+ respx_mock.get(f"{base_url}/v2/gen-ai/indexing_jobs/{job_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "job": {
+ "uuid": job_uuid,
+ "phase": "BATCH_JOB_PHASE_ERROR",
+ }
+ },
+ )
+ )
+
+ with pytest.raises(IndexingJobError) as exc_info:
+ await async_client.knowledge_bases.indexing_jobs.wait_for_completion(job_uuid)
+
+ assert exc_info.value.uuid == job_uuid
+ assert exc_info.value.phase == "BATCH_JOB_PHASE_ERROR"
+ assert "error" in str(exc_info.value).lower()
+
+ @parametrize
+ async def test_wait_for_completion_raises_indexing_job_error_on_cancelled(
+ self, async_client: AsyncGradient, respx_mock: Any
+ ) -> None:
+ """Test that IndexingJobError is raised when job phase is CANCELLED"""
+ job_uuid = "test-job-uuid"
+ respx_mock.get(f"{base_url}/v2/gen-ai/indexing_jobs/{job_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "job": {
+ "uuid": job_uuid,
+ "phase": "BATCH_JOB_PHASE_CANCELLED",
+ }
+ },
+ )
+ )
+
+ with pytest.raises(IndexingJobError) as exc_info:
+ await async_client.knowledge_bases.indexing_jobs.wait_for_completion(job_uuid)
+
+ assert exc_info.value.uuid == job_uuid
+ assert exc_info.value.phase == "BATCH_JOB_PHASE_CANCELLED"
+ assert "cancelled" in str(exc_info.value).lower()
+
+ @parametrize
+ async def test_wait_for_completion_raises_timeout_error(self, async_client: AsyncGradient, respx_mock: Any) -> None:
+ """Test that IndexingJobTimeoutError is raised on timeout"""
+ job_uuid = "test-job-uuid"
+ respx_mock.get(f"{base_url}/v2/gen-ai/indexing_jobs/{job_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "job": {
+ "uuid": job_uuid,
+ "phase": "BATCH_JOB_PHASE_RUNNING",
+ }
+ },
+ )
+ )
+
+ with pytest.raises(IndexingJobTimeoutError) as exc_info:
+ await async_client.knowledge_bases.indexing_jobs.wait_for_completion(
+ job_uuid, poll_interval=0.1, timeout=0.2
+ )
+
+ assert exc_info.value.uuid == job_uuid
+ assert exc_info.value.phase == "BATCH_JOB_PHASE_RUNNING"
+ assert exc_info.value.timeout == 0.2
+
+ @parametrize
+ async def test_wait_for_completion_succeeds(self, async_client: AsyncGradient, respx_mock: Any) -> None:
+ """Test that wait_for_completion returns successfully when job succeeds"""
+ job_uuid = "test-job-uuid"
+ respx_mock.get(f"{base_url}/v2/gen-ai/indexing_jobs/{job_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "job": {
+ "uuid": job_uuid,
+ "phase": "BATCH_JOB_PHASE_SUCCEEDED",
+ "total_items_indexed": "100",
+ "total_items_failed": "0",
+ }
+ },
+ )
+ )
+
+ result = await async_client.knowledge_bases.indexing_jobs.wait_for_completion(job_uuid)
+ assert_matches_type(IndexingJobRetrieveResponse, result, path=["response"])
+ assert result.job is not None
+ assert result.job.phase == "BATCH_JOB_PHASE_SUCCEEDED"
diff --git a/tests/api_resources/models/__init__.py b/tests/api_resources/models/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/models/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/models/providers/__init__.py b/tests/api_resources/models/providers/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/models/providers/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py
new file mode 100644
index 00000000..d0e8209c
--- /dev/null
+++ b/tests/api_resources/models/providers/test_anthropic.py
@@ -0,0 +1,557 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.models.providers import (
+ AnthropicListResponse,
+ AnthropicCreateResponse,
+ AnthropicDeleteResponse,
+ AnthropicUpdateResponse,
+ AnthropicRetrieveResponse,
+ AnthropicListAgentsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAnthropic:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ anthropic = client.models.providers.anthropic.create()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ anthropic = client.models.providers.anthropic.create(
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.models.providers.anthropic.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.models.providers.anthropic.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ anthropic = client.models.providers.anthropic.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.models.providers.anthropic.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.models.providers.anthropic.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.models.providers.anthropic.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ anthropic = client.models.providers.anthropic.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ anthropic = client.models.providers.anthropic.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.models.providers.anthropic.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.models.providers.anthropic.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.models.providers.anthropic.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ anthropic = client.models.providers.anthropic.list()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ anthropic = client.models.providers.anthropic.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.models.providers.anthropic.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.models.providers.anthropic.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ anthropic = client.models.providers.anthropic.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.models.providers.anthropic.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.models.providers.anthropic.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.models.providers.anthropic.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_agents(self, client: Gradient) -> None:
+ anthropic = client.models.providers.anthropic.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_agents_with_all_params(self, client: Gradient) -> None:
+ anthropic = client.models.providers.anthropic.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_agents(self, client: Gradient) -> None:
+ response = client.models.providers.anthropic.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_agents(self, client: Gradient) -> None:
+ with client.models.providers.anthropic.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list_agents(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.models.providers.anthropic.with_raw_response.list_agents(
+ uuid="",
+ )
+
+
+class TestAsyncAnthropic:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ anthropic = await async_client.models.providers.anthropic.create()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ anthropic = await async_client.models.providers.anthropic.create(
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ anthropic = await async_client.models.providers.anthropic.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.models.providers.anthropic.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ anthropic = await async_client.models.providers.anthropic.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ anthropic = await async_client.models.providers.anthropic.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.models.providers.anthropic.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ anthropic = await async_client.models.providers.anthropic.list()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ anthropic = await async_client.models.providers.anthropic.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ anthropic = await async_client.models.providers.anthropic.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.models.providers.anthropic.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_agents(self, async_client: AsyncGradient) -> None:
+ anthropic = await async_client.models.providers.anthropic.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradient) -> None:
+ anthropic = await async_client.models.providers.anthropic.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_agents(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list_agents(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.models.providers.anthropic.with_raw_response.list_agents(
+ uuid="",
+ )
diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py
new file mode 100644
index 00000000..14ef2c24
--- /dev/null
+++ b/tests/api_resources/models/providers/test_openai.py
@@ -0,0 +1,557 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.models.providers import (
+ OpenAIListResponse,
+ OpenAICreateResponse,
+ OpenAIDeleteResponse,
+ OpenAIUpdateResponse,
+ OpenAIRetrieveResponse,
+ OpenAIRetrieveAgentsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestOpenAI:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ openai = client.models.providers.openai.create()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ openai = client.models.providers.openai.create(
+ api_key='"sk-proj--123456789098765432123456789"',
+ name='"Production Key"',
+ )
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.models.providers.openai.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.models.providers.openai.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ openai = client.models.providers.openai.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.models.providers.openai.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.models.providers.openai.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.models.providers.openai.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ openai = client.models.providers.openai.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ openai = client.models.providers.openai.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.models.providers.openai.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.models.providers.openai.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.models.providers.openai.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ openai = client.models.providers.openai.list()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ openai = client.models.providers.openai.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.models.providers.openai.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.models.providers.openai.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ openai = client.models.providers.openai.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.models.providers.openai.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.models.providers.openai.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.models.providers.openai.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_agents(self, client: Gradient) -> None:
+ openai = client.models.providers.openai.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_agents_with_all_params(self, client: Gradient) -> None:
+ openai = client.models.providers.openai.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve_agents(self, client: Gradient) -> None:
+ response = client.models.providers.openai.with_raw_response.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve_agents(self, client: Gradient) -> None:
+ with client.models.providers.openai.with_streaming_response.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve_agents(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.models.providers.openai.with_raw_response.retrieve_agents(
+ uuid="",
+ )
+
+
+class TestAsyncOpenAI:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ openai = await async_client.models.providers.openai.create()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ openai = await async_client.models.providers.openai.create(
+ api_key='"sk-proj--123456789098765432123456789"',
+ name='"Production Key"',
+ )
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ openai = await async_client.models.providers.openai.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.models.providers.openai.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ openai = await async_client.models.providers.openai.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ openai = await async_client.models.providers.openai.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.models.providers.openai.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ openai = await async_client.models.providers.openai.list()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ openai = await async_client.models.providers.openai.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ openai = await async_client.models.providers.openai.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.models.providers.openai.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_agents(self, async_client: AsyncGradient) -> None:
+ openai = await async_client.models.providers.openai.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradient) -> None:
+ openai = await async_client.models.providers.openai.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve_agents(self, async_client: AsyncGradient) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradient) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve_agents(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.models.providers.openai.with_raw_response.retrieve_agents(
+ uuid="",
+ )
diff --git a/tests/api_resources/nfs/__init__.py b/tests/api_resources/nfs/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/nfs/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/nfs/test_snapshots.py b/tests/api_resources/nfs/test_snapshots.py
new file mode 100644
index 00000000..5068f951
--- /dev/null
+++ b/tests/api_resources/nfs/test_snapshots.py
@@ -0,0 +1,305 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.nfs import (
+ SnapshotListResponse,
+ SnapshotRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestSnapshots:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ snapshot = client.nfs.snapshots.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_with_all_params(self, client: Gradient) -> None:
+ snapshot = client.nfs.snapshots.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.nfs.snapshots.with_raw_response.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.nfs.snapshots.with_streaming_response.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"):
+ client.nfs.snapshots.with_raw_response.retrieve(
+ nfs_snapshot_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ snapshot = client.nfs.snapshots.list()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ snapshot = client.nfs.snapshots.list(
+ region="region",
+ share_id="share_id",
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.nfs.snapshots.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.nfs.snapshots.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ snapshot = client.nfs.snapshots.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete_with_all_params(self, client: Gradient) -> None:
+ snapshot = client.nfs.snapshots.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.nfs.snapshots.with_raw_response.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.nfs.snapshots.with_streaming_response.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert snapshot is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"):
+ client.nfs.snapshots.with_raw_response.delete(
+ nfs_snapshot_id="",
+ )
+
+
+class TestAsyncSnapshots:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.nfs.snapshots.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.nfs.snapshots.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.snapshots.with_raw_response.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.snapshots.with_streaming_response.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"):
+ await async_client.nfs.snapshots.with_raw_response.retrieve(
+ nfs_snapshot_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.nfs.snapshots.list()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.nfs.snapshots.list(
+ region="region",
+ share_id="share_id",
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.snapshots.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.snapshots.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.nfs.snapshots.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete_with_all_params(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.nfs.snapshots.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.snapshots.with_raw_response.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.snapshots.with_streaming_response.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert snapshot is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"):
+ await async_client.nfs.snapshots.with_raw_response.delete(
+ nfs_snapshot_id="",
+ )
diff --git a/tests/api_resources/organization/projects/test_api_keys.py b/tests/api_resources/organization/projects/test_api_keys.py
deleted file mode 100644
index d8c6bbc0..00000000
--- a/tests/api_resources/organization/projects/test_api_keys.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization.projects import (
- APIKey,
- APIKeyListResponse,
- APIKeyDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestAPIKeys:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- api_key = client.organization.projects.api_keys.retrieve(
- key_id="key_id",
- project_id="project_id",
- )
- assert_matches_type(APIKey, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="key_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = response.parse()
- assert_matches_type(APIKey, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.api_keys.with_streaming_response.retrieve(
- key_id="key_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = response.parse()
- assert_matches_type(APIKey, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="key_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- api_key = client.organization.projects.api_keys.list(
- project_id="project_id",
- )
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- api_key = client.organization.projects.api_keys.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.api_keys.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = response.parse()
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.api_keys.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = response.parse()
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.api_keys.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- api_key = client.organization.projects.api_keys.delete(
- key_id="key_id",
- project_id="project_id",
- )
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.api_keys.with_raw_response.delete(
- key_id="key_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = response.parse()
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.api_keys.with_streaming_response.delete(
- key_id="key_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = response.parse()
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.api_keys.with_raw_response.delete(
- key_id="key_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- client.organization.projects.api_keys.with_raw_response.delete(
- key_id="",
- project_id="project_id",
- )
-
-
-class TestAsyncAPIKeys:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- api_key = await async_client.organization.projects.api_keys.retrieve(
- key_id="key_id",
- project_id="project_id",
- )
- assert_matches_type(APIKey, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="key_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = await response.parse()
- assert_matches_type(APIKey, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.api_keys.with_streaming_response.retrieve(
- key_id="key_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = await response.parse()
- assert_matches_type(APIKey, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="key_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- await async_client.organization.projects.api_keys.with_raw_response.retrieve(
- key_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- api_key = await async_client.organization.projects.api_keys.list(
- project_id="project_id",
- )
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- api_key = await async_client.organization.projects.api_keys.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.api_keys.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = await response.parse()
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.api_keys.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = await response.parse()
- assert_matches_type(APIKeyListResponse, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.api_keys.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- api_key = await async_client.organization.projects.api_keys.delete(
- key_id="key_id",
- project_id="project_id",
- )
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.api_keys.with_raw_response.delete(
- key_id="key_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- api_key = await response.parse()
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.api_keys.with_streaming_response.delete(
- key_id="key_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- api_key = await response.parse()
- assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.api_keys.with_raw_response.delete(
- key_id="key_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- await async_client.organization.projects.api_keys.with_raw_response.delete(
- key_id="",
- project_id="project_id",
- )
diff --git a/tests/api_resources/organization/projects/test_rate_limits.py b/tests/api_resources/organization/projects/test_rate_limits.py
deleted file mode 100644
index 3f7688b4..00000000
--- a/tests/api_resources/organization/projects/test_rate_limits.py
+++ /dev/null
@@ -1,265 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization.projects import (
- RateLimit,
- RateLimitListResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestRateLimits:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- rate_limit = client.organization.projects.rate_limits.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- )
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- rate_limit = client.organization.projects.rate_limits.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- batch_1_day_max_input_tokens=0,
- max_audio_megabytes_per_1_minute=0,
- max_images_per_1_minute=0,
- max_requests_per_1_day=0,
- max_requests_per_1_minute=0,
- max_tokens_per_1_minute=0,
- )
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- rate_limit = response.parse()
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.rate_limits.with_streaming_response.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- rate_limit = response.parse()
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="rate_limit_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `rate_limit_id` but received ''"):
- client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- rate_limit = client.organization.projects.rate_limits.list(
- project_id="project_id",
- )
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- rate_limit = client.organization.projects.rate_limits.list(
- project_id="project_id",
- after="after",
- before="before",
- limit=0,
- )
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.rate_limits.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- rate_limit = response.parse()
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.rate_limits.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- rate_limit = response.parse()
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.rate_limits.with_raw_response.list(
- project_id="",
- )
-
-
-class TestAsyncRateLimits:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- rate_limit = await async_client.organization.projects.rate_limits.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- )
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- rate_limit = await async_client.organization.projects.rate_limits.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- batch_1_day_max_input_tokens=0,
- max_audio_megabytes_per_1_minute=0,
- max_images_per_1_minute=0,
- max_requests_per_1_day=0,
- max_requests_per_1_minute=0,
- max_tokens_per_1_minute=0,
- )
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- rate_limit = await response.parse()
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.rate_limits.with_streaming_response.update(
- rate_limit_id="rate_limit_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- rate_limit = await response.parse()
- assert_matches_type(RateLimit, rate_limit, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="rate_limit_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `rate_limit_id` but received ''"):
- await async_client.organization.projects.rate_limits.with_raw_response.update(
- rate_limit_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- rate_limit = await async_client.organization.projects.rate_limits.list(
- project_id="project_id",
- )
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- rate_limit = await async_client.organization.projects.rate_limits.list(
- project_id="project_id",
- after="after",
- before="before",
- limit=0,
- )
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.rate_limits.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- rate_limit = await response.parse()
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.rate_limits.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- rate_limit = await response.parse()
- assert_matches_type(RateLimitListResponse, rate_limit, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.rate_limits.with_raw_response.list(
- project_id="",
- )
diff --git a/tests/api_resources/organization/projects/test_service_accounts.py b/tests/api_resources/organization/projects/test_service_accounts.py
deleted file mode 100644
index 4cbdbd38..00000000
--- a/tests/api_resources/organization/projects/test_service_accounts.py
+++ /dev/null
@@ -1,431 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization.projects import (
- ServiceAccount,
- ServiceAccountListResponse,
- ServiceAccountCreateResponse,
- ServiceAccountDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestServiceAccounts:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- service_account = client.organization.projects.service_accounts.create(
- project_id="project_id",
- name="name",
- )
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.service_accounts.with_raw_response.create(
- project_id="project_id",
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = response.parse()
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.service_accounts.with_streaming_response.create(
- project_id="project_id",
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = response.parse()
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.create(
- project_id="",
- name="name",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- service_account = client.organization.projects.service_accounts.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- )
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = response.parse()
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.service_accounts.with_streaming_response.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = response.parse()
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="service_account_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- service_account = client.organization.projects.service_accounts.list(
- project_id="project_id",
- )
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- service_account = client.organization.projects.service_accounts.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.service_accounts.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = response.parse()
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.service_accounts.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = response.parse()
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- service_account = client.organization.projects.service_accounts.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- )
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = response.parse()
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.service_accounts.with_streaming_response.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = response.parse()
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="service_account_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
- client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="",
- project_id="project_id",
- )
-
-
-class TestAsyncServiceAccounts:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- service_account = await async_client.organization.projects.service_accounts.create(
- project_id="project_id",
- name="name",
- )
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.service_accounts.with_raw_response.create(
- project_id="project_id",
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = await response.parse()
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.service_accounts.with_streaming_response.create(
- project_id="project_id",
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = await response.parse()
- assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.create(
- project_id="",
- name="name",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- service_account = await async_client.organization.projects.service_accounts.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- )
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = await response.parse()
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.service_accounts.with_streaming_response.retrieve(
- service_account_id="service_account_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = await response.parse()
- assert_matches_type(ServiceAccount, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="service_account_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.retrieve(
- service_account_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- service_account = await async_client.organization.projects.service_accounts.list(
- project_id="project_id",
- )
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- service_account = await async_client.organization.projects.service_accounts.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.service_accounts.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = await response.parse()
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.service_accounts.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = await response.parse()
- assert_matches_type(ServiceAccountListResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- service_account = await async_client.organization.projects.service_accounts.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- )
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- service_account = await response.parse()
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.service_accounts.with_streaming_response.delete(
- service_account_id="service_account_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- service_account = await response.parse()
- assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="service_account_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"):
- await async_client.organization.projects.service_accounts.with_raw_response.delete(
- service_account_id="",
- project_id="project_id",
- )
diff --git a/tests/api_resources/organization/projects/test_users.py b/tests/api_resources/organization/projects/test_users.py
deleted file mode 100644
index df2a136e..00000000
--- a/tests/api_resources/organization/projects/test_users.py
+++ /dev/null
@@ -1,552 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization.projects import (
- ProjectUser,
- UserListResponse,
- UserDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestUsers:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.retrieve(
- user_id="user_id",
- project_id="project_id",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.users.with_raw_response.retrieve(
- user_id="user_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.users.with_streaming_response.retrieve(
- user_id="user_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.users.with_raw_response.retrieve(
- user_id="user_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.projects.users.with_raw_response.retrieve(
- user_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.users.with_raw_response.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.users.with_streaming_response.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.users.with_raw_response.update(
- user_id="user_id",
- project_id="",
- role="owner",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.projects.users.with_raw_response.update(
- user_id="",
- project_id="project_id",
- role="owner",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.list(
- project_id="project_id",
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.users.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.users.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.users.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.delete(
- user_id="user_id",
- project_id="project_id",
- )
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.users.with_raw_response.delete(
- user_id="user_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.users.with_streaming_response.delete(
- user_id="user_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.users.with_raw_response.delete(
- user_id="user_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.projects.users.with_raw_response.delete(
- user_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_add(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.projects.users.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_add(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.users.with_raw_response.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_add(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.users.with_streaming_response.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_add(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.users.with_raw_response.add(
- project_id="",
- role="owner",
- user_id="user_id",
- )
-
-
-class TestAsyncUsers:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.retrieve(
- user_id="user_id",
- project_id="project_id",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.users.with_raw_response.retrieve(
- user_id="user_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.users.with_streaming_response.retrieve(
- user_id="user_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.retrieve(
- user_id="user_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.retrieve(
- user_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.users.with_raw_response.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.users.with_streaming_response.update(
- user_id="user_id",
- project_id="project_id",
- role="owner",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.update(
- user_id="user_id",
- project_id="",
- role="owner",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.update(
- user_id="",
- project_id="project_id",
- role="owner",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.list(
- project_id="project_id",
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.list(
- project_id="project_id",
- after="after",
- limit=0,
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.users.with_raw_response.list(
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.users.with_streaming_response.list(
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.list(
- project_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.delete(
- user_id="user_id",
- project_id="project_id",
- )
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.users.with_raw_response.delete(
- user_id="user_id",
- project_id="project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.users.with_streaming_response.delete(
- user_id="user_id",
- project_id="project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.delete(
- user_id="user_id",
- project_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.delete(
- user_id="",
- project_id="project_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.projects.users.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- )
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.users.with_raw_response.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.users.with_streaming_response.add(
- project_id="project_id",
- role="owner",
- user_id="user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(ProjectUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.users.with_raw_response.add(
- project_id="",
- role="owner",
- user_id="user_id",
- )
diff --git a/tests/api_resources/organization/test_admin_api_keys.py b/tests/api_resources/organization/test_admin_api_keys.py
deleted file mode 100644
index 0e0949a1..00000000
--- a/tests/api_resources/organization/test_admin_api_keys.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization import (
- AdminAPIKey,
- AdminAPIKeyListResponse,
- AdminAPIKeyDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestAdminAPIKeys:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- admin_api_key = client.organization.admin_api_keys.create(
- name="New Admin Key",
- )
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.admin_api_keys.with_raw_response.create(
- name="New Admin Key",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.admin_api_keys.with_streaming_response.create(
- name="New Admin Key",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- admin_api_key = client.organization.admin_api_keys.retrieve(
- "key_id",
- )
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.admin_api_keys.with_raw_response.retrieve(
- "key_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.admin_api_keys.with_streaming_response.retrieve(
- "key_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- client.organization.admin_api_keys.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- admin_api_key = client.organization.admin_api_keys.list()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- admin_api_key = client.organization.admin_api_keys.list(
- after="after",
- limit=0,
- order="asc",
- )
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.admin_api_keys.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.admin_api_keys.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- admin_api_key = client.organization.admin_api_keys.delete(
- "key_id",
- )
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.admin_api_keys.with_raw_response.delete(
- "key_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.admin_api_keys.with_streaming_response.delete(
- "key_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = response.parse()
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- client.organization.admin_api_keys.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncAdminAPIKeys:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- admin_api_key = await async_client.organization.admin_api_keys.create(
- name="New Admin Key",
- )
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.admin_api_keys.with_raw_response.create(
- name="New Admin Key",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.admin_api_keys.with_streaming_response.create(
- name="New Admin Key",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- admin_api_key = await async_client.organization.admin_api_keys.retrieve(
- "key_id",
- )
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.admin_api_keys.with_raw_response.retrieve(
- "key_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.admin_api_keys.with_streaming_response.retrieve(
- "key_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKey, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- await async_client.organization.admin_api_keys.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- admin_api_key = await async_client.organization.admin_api_keys.list()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- admin_api_key = await async_client.organization.admin_api_keys.list(
- after="after",
- limit=0,
- order="asc",
- )
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.admin_api_keys.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.admin_api_keys.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- admin_api_key = await async_client.organization.admin_api_keys.delete(
- "key_id",
- )
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.admin_api_keys.with_raw_response.delete(
- "key_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.admin_api_keys.with_streaming_response.delete(
- "key_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- admin_api_key = await response.parse()
- assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"):
- await async_client.organization.admin_api_keys.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/organization/test_invites.py b/tests/api_resources/organization/test_invites.py
deleted file mode 100644
index 73528d26..00000000
--- a/tests/api_resources/organization/test_invites.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization import (
- Invite,
- InviteListResponse,
- InviteDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestInvites:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.create(
- email="email",
- role="reader",
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.create(
- email="email",
- role="reader",
- projects=[
- {
- "id": "id",
- "role": "member",
- }
- ],
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.invites.with_raw_response.create(
- email="email",
- role="reader",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.invites.with_streaming_response.create(
- email="email",
- role="reader",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.retrieve(
- "invite_id",
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.invites.with_raw_response.retrieve(
- "invite_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.invites.with_streaming_response.retrieve(
- "invite_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
- client.organization.invites.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.list()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.list(
- after="after",
- limit=0,
- )
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.invites.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = response.parse()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.invites.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = response.parse()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- invite = client.organization.invites.delete(
- "invite_id",
- )
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.invites.with_raw_response.delete(
- "invite_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = response.parse()
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.invites.with_streaming_response.delete(
- "invite_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = response.parse()
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
- client.organization.invites.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncInvites:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.create(
- email="email",
- role="reader",
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.create(
- email="email",
- role="reader",
- projects=[
- {
- "id": "id",
- "role": "member",
- }
- ],
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.invites.with_raw_response.create(
- email="email",
- role="reader",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = await response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.invites.with_streaming_response.create(
- email="email",
- role="reader",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = await response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.retrieve(
- "invite_id",
- )
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.invites.with_raw_response.retrieve(
- "invite_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = await response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.invites.with_streaming_response.retrieve(
- "invite_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = await response.parse()
- assert_matches_type(Invite, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
- await async_client.organization.invites.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.list()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.list(
- after="after",
- limit=0,
- )
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.invites.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = await response.parse()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.invites.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = await response.parse()
- assert_matches_type(InviteListResponse, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- invite = await async_client.organization.invites.delete(
- "invite_id",
- )
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.invites.with_raw_response.delete(
- "invite_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- invite = await response.parse()
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.invites.with_streaming_response.delete(
- "invite_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- invite = await response.parse()
- assert_matches_type(InviteDeleteResponse, invite, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"):
- await async_client.organization.invites.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/organization/test_projects.py b/tests/api_resources/organization/test_projects.py
deleted file mode 100644
index 6b9dd9a4..00000000
--- a/tests/api_resources/organization/test_projects.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization import (
- Project,
- ProjectListResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestProjects:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.create(
- name="name",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.with_raw_response.create(
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.with_streaming_response.create(
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.retrieve(
- "project_id",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.with_raw_response.retrieve(
- "project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.with_streaming_response.retrieve(
- "project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.update(
- project_id="project_id",
- name="name",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.with_raw_response.update(
- project_id="project_id",
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.with_streaming_response.update(
- project_id="project_id",
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.with_raw_response.update(
- project_id="",
- name="name",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.list()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.list(
- after="after",
- include_archived=True,
- limit=0,
- )
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = response.parse()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = response.parse()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_archive(self, client: DigitaloceanGenaiSDK) -> None:
- project = client.organization.projects.archive(
- "project_id",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_archive(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.projects.with_raw_response.archive(
- "project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_archive(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.projects.with_streaming_response.archive(
- "project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_archive(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.organization.projects.with_raw_response.archive(
- "",
- )
-
-
-class TestAsyncProjects:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.create(
- name="name",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.with_raw_response.create(
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.with_streaming_response.create(
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.retrieve(
- "project_id",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.with_raw_response.retrieve(
- "project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.with_streaming_response.retrieve(
- "project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.update(
- project_id="project_id",
- name="name",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.with_raw_response.update(
- project_id="project_id",
- name="name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.with_streaming_response.update(
- project_id="project_id",
- name="name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.with_raw_response.update(
- project_id="",
- name="name",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.list()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.list(
- after="after",
- include_archived=True,
- limit=0,
- )
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = await response.parse()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = await response.parse()
- assert_matches_type(ProjectListResponse, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- project = await async_client.organization.projects.archive(
- "project_id",
- )
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.projects.with_raw_response.archive(
- "project_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.projects.with_streaming_response.archive(
- "project_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- project = await response.parse()
- assert_matches_type(Project, project, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.organization.projects.with_raw_response.archive(
- "",
- )
diff --git a/tests/api_resources/organization/test_usage.py b/tests/api_resources/organization/test_usage.py
deleted file mode 100644
index 198f2159..00000000
--- a/tests/api_resources/organization/test_usage.py
+++ /dev/null
@@ -1,834 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import UsageResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestUsage:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.audio_speeches(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_audio_speeches_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.audio_speeches(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.audio_speeches(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.audio_speeches(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.audio_transcriptions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_audio_transcriptions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.audio_transcriptions(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.audio_transcriptions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.audio_transcriptions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.code_interpreter_sessions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_code_interpreter_sessions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.code_interpreter_sessions(
- start_time=0,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.code_interpreter_sessions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.code_interpreter_sessions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_completions(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.completions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_completions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.completions(
- start_time=0,
- api_key_ids=["string"],
- batch=True,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_completions(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.completions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_completions(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.completions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_embeddings(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.embeddings(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_embeddings_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.embeddings(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_embeddings(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.embeddings(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_embeddings(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.embeddings(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_images(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.images(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_images_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.images(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- sizes=["256x256"],
- sources=["image.generation"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_images(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.images(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_images(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.images(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_moderations(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.moderations(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_moderations_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.moderations(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_moderations(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.moderations(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_moderations(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.moderations(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_vector_stores(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.vector_stores(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_vector_stores_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- usage = client.organization.usage.vector_stores(
- start_time=0,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_vector_stores(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.usage.with_raw_response.vector_stores(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_vector_stores(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.usage.with_streaming_response.vector_stores(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncUsage:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.audio_speeches(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_audio_speeches_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.audio_speeches(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.audio_speeches(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.audio_speeches(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.audio_transcriptions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_audio_transcriptions_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.audio_transcriptions(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.audio_transcriptions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.audio_transcriptions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.code_interpreter_sessions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_code_interpreter_sessions_with_all_params(
- self, async_client: AsyncDigitaloceanGenaiSDK
- ) -> None:
- usage = await async_client.organization.usage.code_interpreter_sessions(
- start_time=0,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.code_interpreter_sessions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.code_interpreter_sessions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.completions(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_completions_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.completions(
- start_time=0,
- api_key_ids=["string"],
- batch=True,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.completions(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.completions(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.embeddings(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_embeddings_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.embeddings(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.embeddings(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.embeddings(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.images(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_images_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.images(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- sizes=["256x256"],
- sources=["image.generation"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.images(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.images(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.moderations(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_moderations_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.moderations(
- start_time=0,
- api_key_ids=["string"],
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- models=["string"],
- page="page",
- project_ids=["string"],
- user_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.moderations(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.moderations(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.vector_stores(
- start_time=0,
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_vector_stores_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- usage = await async_client.organization.usage.vector_stores(
- start_time=0,
- bucket_width="1m",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.usage.with_raw_response.vector_stores(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.usage.with_streaming_response.vector_stores(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- usage = await response.parse()
- assert_matches_type(UsageResponse, usage, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/organization/test_users.py b/tests/api_resources/organization/test_users.py
deleted file mode 100644
index b40fcbef..00000000
--- a/tests/api_resources/organization/test_users.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.organization import (
- OrganizationUser,
- UserListResponse,
- UserDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestUsers:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.users.retrieve(
- "user_id",
- )
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.users.with_raw_response.retrieve(
- "user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.users.with_streaming_response.retrieve(
- "user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.users.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.users.update(
- user_id="user_id",
- role="owner",
- )
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.users.with_raw_response.update(
- user_id="user_id",
- role="owner",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.users.with_streaming_response.update(
- user_id="user_id",
- role="owner",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.users.with_raw_response.update(
- user_id="",
- role="owner",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.users.list()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.users.list(
- after="after",
- emails=["string"],
- limit=0,
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.users.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.users.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- user = client.organization.users.delete(
- "user_id",
- )
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.users.with_raw_response.delete(
- "user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.users.with_streaming_response.delete(
- "user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- client.organization.users.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncUsers:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.users.retrieve(
- "user_id",
- )
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.users.with_raw_response.retrieve(
- "user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.users.with_streaming_response.retrieve(
- "user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.users.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.users.update(
- user_id="user_id",
- role="owner",
- )
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.users.with_raw_response.update(
- user_id="user_id",
- role="owner",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.users.with_streaming_response.update(
- user_id="user_id",
- role="owner",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(OrganizationUser, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.users.with_raw_response.update(
- user_id="",
- role="owner",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.users.list()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.users.list(
- after="after",
- emails=["string"],
- limit=0,
- )
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.users.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.users.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(UserListResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- user = await async_client.organization.users.delete(
- "user_id",
- )
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.users.with_raw_response.delete(
- "user_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- user = await response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.users.with_streaming_response.delete(
- "user_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- user = await response.parse()
- assert_matches_type(UserDeleteResponse, user, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"):
- await async_client.organization.users.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py
new file mode 100644
index 00000000..0c1833f6
--- /dev/null
+++ b/tests/api_resources/test_agents.py
@@ -0,0 +1,909 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import httpx
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types import (
+ AgentListResponse,
+ AgentCreateResponse,
+ AgentDeleteResponse,
+ AgentUpdateResponse,
+ AgentRetrieveResponse,
+ AgentUpdateStatusResponse,
+ AgentRetrieveUsageResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAgents:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ agent = client.agents.create()
+ assert_matches_type(AgentCreateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ agent = client.agents.create(
+ anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ description='"My Agent Description"',
+ instruction='"You are an agent who thinks deeply about the world"',
+ knowledge_base_uuid=["example string"],
+ model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ model_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"My Agent"',
+ openai_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ project_id='"12345678-1234-1234-1234-123456789012"',
+ region='"tor1"',
+ tags=["example string"],
+ workspace_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(AgentCreateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentCreateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentCreateResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ agent = client.agents.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentRetrieveResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.agents.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentRetrieveResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.agents.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentRetrieveResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ agent = client.agents.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ agent = client.agents.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_log_insights_enabled=True,
+ allowed_domains=["example string"],
+ anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ conversation_logs_enabled=True,
+ description='"My Agent Description"',
+ instruction='"You are an agent who thinks deeply about the world"',
+ k=5,
+ max_tokens=100,
+ model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ model_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"My New Agent Name"',
+ openai_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ project_id='"12345678-1234-1234-1234-123456789012"',
+ provide_citations=True,
+ retrieval_method="RETRIEVAL_METHOD_UNKNOWN",
+ tags=["example string"],
+ temperature=0.7,
+ top_p=0.9,
+ body_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.agents.with_raw_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.agents.with_streaming_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
+ client.agents.with_raw_response.update(
+ path_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ agent = client.agents.list()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ agent = client.agents.list(
+ only_deployed=True,
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.agents.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.agents.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ agent = client.agents.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.agents.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.agents.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_usage(self, client: Gradient) -> None:
+ agent = client.agents.retrieve_usage(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_usage_with_all_params(self, client: Gradient) -> None:
+ agent = client.agents.retrieve_usage(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ start="start",
+ stop="stop",
+ )
+ assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve_usage(self, client: Gradient) -> None:
+ response = client.agents.with_raw_response.retrieve_usage(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve_usage(self, client: Gradient) -> None:
+ with client.agents.with_streaming_response.retrieve_usage(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve_usage(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.with_raw_response.retrieve_usage(
+ uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_status(self, client: Gradient) -> None:
+ agent = client.agents.update_status(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_status_with_all_params(self, client: Gradient) -> None:
+ agent = client.agents.update_status(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_uuid='"12345678-1234-1234-1234-123456789012"',
+ visibility="VISIBILITY_UNKNOWN",
+ )
+ assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update_status(self, client: Gradient) -> None:
+ response = client.agents.with_raw_response.update_status(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update_status(self, client: Gradient) -> None:
+ with client.agents.with_streaming_response.update_status(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update_status(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
+ client.agents.with_raw_response.update_status(
+ path_uuid="",
+ )
+
+ @parametrize
+ def test_method_wait_until_ready(self, client: Gradient, respx_mock: Any) -> None:
+ """Test successful wait_until_ready when agent becomes ready."""
+ agent_uuid = "test-agent-id"
+
+ # Create side effect that returns different responses
+ call_count = [0]
+
+ def get_response(_: httpx.Request) -> httpx.Response:
+ call_count[0] += 1
+ if call_count[0] == 1:
+ # First call: deploying
+ return httpx.Response(
+ 200,
+ json={
+ "agent": {
+ "uuid": agent_uuid,
+ "deployment": {"status": "STATUS_DEPLOYING"},
+ }
+ },
+ )
+ else:
+ # Subsequent calls: running
+ return httpx.Response(
+ 200,
+ json={
+ "agent": {
+ "uuid": agent_uuid,
+ "deployment": {"status": "STATUS_RUNNING"},
+ }
+ },
+ )
+
+ respx_mock.get(f"/v2/gen-ai/agents/{agent_uuid}").mock(side_effect=get_response)
+
+ agent = client.agents.wait_until_ready(agent_uuid, poll_interval=0.1, timeout=10.0)
+ assert_matches_type(AgentRetrieveResponse, agent, path=["response"])
+ assert agent.agent is not None
+ assert agent.agent.deployment is not None
+ assert agent.agent.deployment.status == "STATUS_RUNNING"
+
+ @parametrize
+ def test_wait_until_ready_timeout(self, client: Gradient, respx_mock: Any) -> None:
+ """Test that wait_until_ready raises timeout error."""
+ from gradient._exceptions import AgentDeploymentTimeoutError
+
+ agent_uuid = "test-agent-id"
+
+ # Mock always returns deploying
+ respx_mock.get(f"/v2/gen-ai/agents/{agent_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "agent": {
+ "uuid": agent_uuid,
+ "deployment": {"status": "STATUS_DEPLOYING"},
+ }
+ },
+ )
+ )
+
+ with pytest.raises(AgentDeploymentTimeoutError) as exc_info:
+ client.agents.wait_until_ready(agent_uuid, poll_interval=0.1, timeout=0.5)
+
+ assert "did not reach STATUS_RUNNING within" in str(exc_info.value)
+ assert exc_info.value.agent_id == agent_uuid
+
+ @parametrize
+ def test_wait_until_ready_deployment_failed(self, client: Gradient, respx_mock: Any) -> None:
+ """Test that wait_until_ready raises error on deployment failure."""
+ from gradient._exceptions import AgentDeploymentError
+
+ agent_uuid = "test-agent-id"
+
+ # Mock returns failed status
+ respx_mock.get(f"/v2/gen-ai/agents/{agent_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "agent": {
+ "uuid": agent_uuid,
+ "deployment": {"status": "STATUS_FAILED"},
+ }
+ },
+ )
+ )
+
+ with pytest.raises(AgentDeploymentError) as exc_info:
+ client.agents.wait_until_ready(agent_uuid, poll_interval=0.1, timeout=10.0)
+
+ assert "deployment failed with status: STATUS_FAILED" in str(exc_info.value)
+ assert exc_info.value.status == "STATUS_FAILED"
+
+ @parametrize
+ def test_wait_until_ready_empty_uuid(self, client: Gradient) -> None:
+ """Test that wait_until_ready validates empty uuid."""
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid`"):
+ client.agents.wait_until_ready("")
+
+
+class TestAsyncAgents:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.create()
+ assert_matches_type(AgentCreateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.create(
+ anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ description='"My Agent Description"',
+ instruction='"You are an agent who thinks deeply about the world"',
+ knowledge_base_uuid=["example string"],
+ model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ model_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"My Agent"',
+ openai_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ project_id='"12345678-1234-1234-1234-123456789012"',
+ region='"tor1"',
+ tags=["example string"],
+ workspace_uuid="123e4567-e89b-12d3-a456-426614174000",
+ )
+ assert_matches_type(AgentCreateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentCreateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentCreateResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentRetrieveResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentRetrieveResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentRetrieveResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ agent_log_insights_enabled=True,
+ allowed_domains=["example string"],
+ anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ conversation_logs_enabled=True,
+ description='"My Agent Description"',
+ instruction='"You are an agent who thinks deeply about the world"',
+ k=5,
+ max_tokens=100,
+ model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ model_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"My New Agent Name"',
+ openai_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ project_id='"12345678-1234-1234-1234-123456789012"',
+ provide_citations=True,
+ retrieval_method="RETRIEVAL_METHOD_UNKNOWN",
+ tags=["example string"],
+ temperature=0.7,
+ top_p=0.9,
+ body_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.with_raw_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.with_streaming_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentUpdateResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
+ await async_client.agents.with_raw_response.update(
+ path_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.list()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.list(
+ only_deployed=True,
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentDeleteResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_usage(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.retrieve_usage(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_usage_with_all_params(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.retrieve_usage(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ start="start",
+ stop="stop",
+ )
+ assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve_usage(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.with_raw_response.retrieve_usage(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve_usage(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.with_streaming_response.retrieve_usage(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve_usage(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.with_raw_response.retrieve_usage(
+ uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_status(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.update_status(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_status_with_all_params(self, async_client: AsyncGradient) -> None:
+ agent = await async_client.agents.update_status(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ body_uuid='"12345678-1234-1234-1234-123456789012"',
+ visibility="VISIBILITY_UNKNOWN",
+ )
+ assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update_status(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.with_raw_response.update_status(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update_status(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.with_streaming_response.update_status(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update_status(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
+ await async_client.agents.with_raw_response.update_status(
+ path_uuid="",
+ )
+
+ @parametrize
+ async def test_method_wait_until_ready(self, async_client: AsyncGradient, respx_mock: Any) -> None:
+ """Test successful async wait_until_ready when agent becomes ready."""
+ agent_uuid = "test-agent-id"
+
+ # Create side effect that returns different responses
+ call_count = [0]
+
+ def get_response(_: httpx.Request) -> httpx.Response:
+ call_count[0] += 1
+ if call_count[0] == 1:
+ # First call: deploying
+ return httpx.Response(
+ 200,
+ json={
+ "agent": {
+ "uuid": agent_uuid,
+ "deployment": {"status": "STATUS_DEPLOYING"},
+ }
+ },
+ )
+ else:
+ # Subsequent calls: running
+ return httpx.Response(
+ 200,
+ json={
+ "agent": {
+ "uuid": agent_uuid,
+ "deployment": {"status": "STATUS_RUNNING"},
+ }
+ },
+ )
+
+ respx_mock.get(f"/v2/gen-ai/agents/{agent_uuid}").mock(side_effect=get_response)
+
+ agent = await async_client.agents.wait_until_ready(agent_uuid, poll_interval=0.1, timeout=10.0)
+ assert_matches_type(AgentRetrieveResponse, agent, path=["response"])
+ assert agent.agent is not None
+ assert agent.agent.deployment is not None
+ assert agent.agent.deployment.status == "STATUS_RUNNING"
+
+ @parametrize
+ async def test_wait_until_ready_timeout(self, async_client: AsyncGradient, respx_mock: Any) -> None:
+ """Test that async wait_until_ready raises timeout error."""
+ from gradient._exceptions import AgentDeploymentTimeoutError
+
+ agent_uuid = "test-agent-id"
+
+ # Mock always returns deploying
+ respx_mock.get(f"/v2/gen-ai/agents/{agent_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "agent": {
+ "uuid": agent_uuid,
+ "deployment": {"status": "STATUS_DEPLOYING"},
+ }
+ },
+ )
+ )
+
+ with pytest.raises(AgentDeploymentTimeoutError) as exc_info:
+ await async_client.agents.wait_until_ready(agent_uuid, poll_interval=0.1, timeout=0.5)
+
+ assert "did not reach STATUS_RUNNING within" in str(exc_info.value)
+ assert exc_info.value.agent_id == agent_uuid
+
+ @parametrize
+ async def test_wait_until_ready_deployment_failed(self, async_client: AsyncGradient, respx_mock: Any) -> None:
+ """Test that async wait_until_ready raises error on deployment failure."""
+ from gradient._exceptions import AgentDeploymentError
+
+ agent_uuid = "test-agent-id"
+
+ # Mock returns failed status
+ respx_mock.get(f"/v2/gen-ai/agents/{agent_uuid}").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "agent": {
+ "uuid": agent_uuid,
+ "deployment": {"status": "STATUS_FAILED"},
+ }
+ },
+ )
+ )
+
+ with pytest.raises(AgentDeploymentError) as exc_info:
+ await async_client.agents.wait_until_ready(agent_uuid, poll_interval=0.1, timeout=10.0)
+
+ assert "deployment failed with status: STATUS_FAILED" in str(exc_info.value)
+ assert exc_info.value.status == "STATUS_FAILED"
diff --git a/tests/api_resources/test_assistants.py b/tests/api_resources/test_assistants.py
deleted file mode 100644
index a5fa998d..00000000
--- a/tests/api_resources/test_assistants.py
+++ /dev/null
@@ -1,528 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- AssistantObject,
- AssistantListResponse,
- AssistantDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestAssistants:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.create(
- model="gpt-4o",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.create(
- model="gpt-4o",
- description="description",
- instructions="instructions",
- metadata={"foo": "string"},
- name="name",
- reasoning_effort="low",
- response_format="auto",
- temperature=1,
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.assistants.with_raw_response.create(
- model="gpt-4o",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.assistants.with_streaming_response.create(
- model="gpt-4o",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.retrieve(
- "assistant_id",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.assistants.with_raw_response.retrieve(
- "assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.assistants.with_streaming_response.retrieve(
- "assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- client.assistants.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.update(
- assistant_id="assistant_id",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.update(
- assistant_id="assistant_id",
- description="description",
- instructions="instructions",
- metadata={"foo": "string"},
- model="string",
- name="name",
- reasoning_effort="low",
- response_format="auto",
- temperature=1,
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.assistants.with_raw_response.update(
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.assistants.with_streaming_response.update(
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- client.assistants.with_raw_response.update(
- assistant_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.list()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.list(
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.assistants.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = response.parse()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.assistants.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = response.parse()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- assistant = client.assistants.delete(
- "assistant_id",
- )
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.assistants.with_raw_response.delete(
- "assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = response.parse()
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.assistants.with_streaming_response.delete(
- "assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = response.parse()
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- client.assistants.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncAssistants:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.create(
- model="gpt-4o",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.create(
- model="gpt-4o",
- description="description",
- instructions="instructions",
- metadata={"foo": "string"},
- name="name",
- reasoning_effort="low",
- response_format="auto",
- temperature=1,
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.assistants.with_raw_response.create(
- model="gpt-4o",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.assistants.with_streaming_response.create(
- model="gpt-4o",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.retrieve(
- "assistant_id",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.assistants.with_raw_response.retrieve(
- "assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.assistants.with_streaming_response.retrieve(
- "assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- await async_client.assistants.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.update(
- assistant_id="assistant_id",
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.update(
- assistant_id="assistant_id",
- description="description",
- instructions="instructions",
- metadata={"foo": "string"},
- model="string",
- name="name",
- reasoning_effort="low",
- response_format="auto",
- temperature=1,
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- )
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.assistants.with_raw_response.update(
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.assistants.with_streaming_response.update(
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = await response.parse()
- assert_matches_type(AssistantObject, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- await async_client.assistants.with_raw_response.update(
- assistant_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.list()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.list(
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.assistants.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = await response.parse()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.assistants.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = await response.parse()
- assert_matches_type(AssistantListResponse, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- assistant = await async_client.assistants.delete(
- "assistant_id",
- )
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.assistants.with_raw_response.delete(
- "assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assistant = await response.parse()
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.assistants.with_streaming_response.delete(
- "assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assistant = await response.parse()
- assert_matches_type(AssistantDeleteResponse, assistant, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
- await async_client.assistants.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/test_audio.py b/tests/api_resources/test_audio.py
deleted file mode 100644
index e71d568e..00000000
--- a/tests/api_resources/test_audio.py
+++ /dev/null
@@ -1,383 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import httpx
-import pytest
-from respx import MockRouter
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- AudioTranslateAudioResponse,
- AudioTranscribeAudioResponse,
-)
-from digitalocean_genai_sdk._response import (
- BinaryAPIResponse,
- AsyncBinaryAPIResponse,
- StreamedBinaryAPIResponse,
- AsyncStreamedBinaryAPIResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestAudio:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_method_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- audio = client.audio.generate_speech(
- input="input",
- model="string",
- voice="ash",
- )
- assert audio.is_closed
- assert audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, BinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_method_generate_speech_with_all_params(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- audio = client.audio.generate_speech(
- input="input",
- model="string",
- voice="ash",
- instructions="instructions",
- response_format="mp3",
- speed=0.25,
- )
- assert audio.is_closed
- assert audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, BinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_raw_response_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
-
- audio = client.audio.with_raw_response.generate_speech(
- input="input",
- model="string",
- voice="ash",
- )
-
- assert audio.is_closed is True
- assert audio.http_request.headers.get("X-Stainless-Lang") == "python"
- assert audio.json() == {"foo": "bar"}
- assert isinstance(audio, BinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- def test_streaming_response_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- with client.audio.with_streaming_response.generate_speech(
- input="input",
- model="string",
- voice="ash",
- ) as audio:
- assert not audio.is_closed
- assert audio.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assert audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, StreamedBinaryAPIResponse)
-
- assert cast(Any, audio.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None:
- audio = client.audio.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- )
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_transcribe_audio_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- audio = client.audio.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- include=["logprobs"],
- language="language",
- prompt="prompt",
- response_format="json",
- stream=True,
- temperature=0,
- timestamp_granularities=["word"],
- )
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.audio.with_raw_response.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- audio = response.parse()
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None:
- with client.audio.with_streaming_response.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- audio = response.parse()
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_translate_audio(self, client: DigitaloceanGenaiSDK) -> None:
- audio = client.audio.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- )
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_translate_audio_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- audio = client.audio.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- prompt="prompt",
- response_format="json",
- temperature=0,
- )
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_translate_audio(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.audio.with_raw_response.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- audio = response.parse()
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_translate_audio(self, client: DigitaloceanGenaiSDK) -> None:
- with client.audio.with_streaming_response.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- audio = response.parse()
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncAudio:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_method_generate_speech(
- self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter
- ) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- audio = await async_client.audio.generate_speech(
- input="input",
- model="string",
- voice="ash",
- )
- assert audio.is_closed
- assert await audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, AsyncBinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_method_generate_speech_with_all_params(
- self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter
- ) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- audio = await async_client.audio.generate_speech(
- input="input",
- model="string",
- voice="ash",
- instructions="instructions",
- response_format="mp3",
- speed=0.25,
- )
- assert audio.is_closed
- assert await audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, AsyncBinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_raw_response_generate_speech(
- self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter
- ) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
-
- audio = await async_client.audio.with_raw_response.generate_speech(
- input="input",
- model="string",
- voice="ash",
- )
-
- assert audio.is_closed is True
- assert audio.http_request.headers.get("X-Stainless-Lang") == "python"
- assert await audio.json() == {"foo": "bar"}
- assert isinstance(audio, AsyncBinaryAPIResponse)
-
- @pytest.mark.skip()
- @parametrize
- @pytest.mark.respx(base_url=base_url)
- async def test_streaming_response_generate_speech(
- self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter
- ) -> None:
- respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- async with async_client.audio.with_streaming_response.generate_speech(
- input="input",
- model="string",
- voice="ash",
- ) as audio:
- assert not audio.is_closed
- assert audio.http_request.headers.get("X-Stainless-Lang") == "python"
-
- assert await audio.json() == {"foo": "bar"}
- assert cast(Any, audio.is_closed) is True
- assert isinstance(audio, AsyncStreamedBinaryAPIResponse)
-
- assert cast(Any, audio.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- audio = await async_client.audio.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- )
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_transcribe_audio_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- audio = await async_client.audio.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- include=["logprobs"],
- language="language",
- prompt="prompt",
- response_format="json",
- stream=True,
- temperature=0,
- timestamp_granularities=["word"],
- )
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.audio.with_raw_response.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- audio = await response.parse()
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.audio.with_streaming_response.transcribe_audio(
- file=b"raw file contents",
- model="gpt-4o-transcribe",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- audio = await response.parse()
- assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- audio = await async_client.audio.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- )
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_translate_audio_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- audio = await async_client.audio.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- prompt="prompt",
- response_format="json",
- temperature=0,
- )
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.audio.with_raw_response.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- audio = await response.parse()
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.audio.with_streaming_response.translate_audio(
- file=b"raw file contents",
- model="whisper-1",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- audio = await response.parse()
- assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py
deleted file mode 100644
index 6ad0bbee..00000000
--- a/tests/api_resources/test_batches.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import Batch, BatchListResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestBatches:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.batches.with_raw_response.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.batches.with_streaming_response.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.retrieve(
- "batch_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.batches.with_raw_response.retrieve(
- "batch_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.batches.with_streaming_response.retrieve(
- "batch_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.batches.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.list()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.list(
- after="after",
- limit=0,
- )
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.batches.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = response.parse()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.batches.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = response.parse()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- batch = client.batches.cancel(
- "batch_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.batches.with_raw_response.cancel(
- "batch_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with client.batches.with_streaming_response.cancel(
- "batch_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.batches.with_raw_response.cancel(
- "",
- )
-
-
-class TestAsyncBatches:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.batches.with_raw_response.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.batches.with_streaming_response.create(
- completion_window="24h",
- endpoint="/v1/responses",
- input_file_id="input_file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.retrieve(
- "batch_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.batches.with_raw_response.retrieve(
- "batch_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.batches.with_streaming_response.retrieve(
- "batch_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.batches.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.list()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.list(
- after="after",
- limit=0,
- )
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.batches.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = await response.parse()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.batches.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = await response.parse()
- assert_matches_type(BatchListResponse, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- batch = await async_client.batches.cancel(
- "batch_id",
- )
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.batches.with_raw_response.cancel(
- "batch_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.batches.with_streaming_response.cancel(
- "batch_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- batch = await response.parse()
- assert_matches_type(Batch, batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.batches.with_raw_response.cancel(
- "",
- )
diff --git a/tests/api_resources/test_billing.py b/tests/api_resources/test_billing.py
new file mode 100644
index 00000000..9bcd29e0
--- /dev/null
+++ b/tests/api_resources/test_billing.py
@@ -0,0 +1,177 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types import BillingListInsightsResponse
+from gradient._utils import parse_date
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestBilling:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_insights(self, client: Gradient) -> None:
+ billing = client.billing.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date=parse_date("2025-01-01"),
+ )
+ assert_matches_type(BillingListInsightsResponse, billing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_insights_with_all_params(self, client: Gradient) -> None:
+ billing = client.billing.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date=parse_date("2025-01-01"),
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(BillingListInsightsResponse, billing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_insights(self, client: Gradient) -> None:
+ response = client.billing.with_raw_response.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date=parse_date("2025-01-01"),
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ billing = response.parse()
+ assert_matches_type(BillingListInsightsResponse, billing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_insights(self, client: Gradient) -> None:
+ with client.billing.with_streaming_response.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date=parse_date("2025-01-01"),
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ billing = response.parse()
+ assert_matches_type(BillingListInsightsResponse, billing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list_insights(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `account_urn` but received ''"):
+ client.billing.with_raw_response.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="",
+ start_date=parse_date("2025-01-01"),
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `start_date` but received ''"):
+ client.billing.with_raw_response.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `end_date` but received ''"):
+ client.billing.with_raw_response.list_insights(
+ end_date="",
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date=parse_date("2025-01-01"),
+ )
+
+
+class TestAsyncBilling:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_insights(self, async_client: AsyncGradient) -> None:
+ billing = await async_client.billing.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date=parse_date("2025-01-01"),
+ )
+ assert_matches_type(BillingListInsightsResponse, billing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_insights_with_all_params(self, async_client: AsyncGradient) -> None:
+ billing = await async_client.billing.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date=parse_date("2025-01-01"),
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(BillingListInsightsResponse, billing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_insights(self, async_client: AsyncGradient) -> None:
+ response = await async_client.billing.with_raw_response.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date=parse_date("2025-01-01"),
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ billing = await response.parse()
+ assert_matches_type(BillingListInsightsResponse, billing, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_insights(self, async_client: AsyncGradient) -> None:
+ async with async_client.billing.with_streaming_response.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date=parse_date("2025-01-01"),
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ billing = await response.parse()
+ assert_matches_type(BillingListInsightsResponse, billing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list_insights(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `account_urn` but received ''"):
+ await async_client.billing.with_raw_response.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="",
+ start_date=parse_date("2025-01-01"),
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `start_date` but received ''"):
+ await async_client.billing.with_raw_response.list_insights(
+ end_date=parse_date("2025-01-31"),
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `end_date` but received ''"):
+ await async_client.billing.with_raw_response.list_insights(
+ end_date="",
+ account_urn="do:team:12345678-1234-1234-1234-123456789012",
+ start_date=parse_date("2025-01-01"),
+ )
diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py
deleted file mode 100644
index eb5c1abd..00000000
--- a/tests/api_resources/test_completions.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- CompletionCreateResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestCompletions:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.completions.create(
- model="string",
- prompt="This is a test.",
- )
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- completion = client.completions.create(
- model="string",
- prompt="This is a test.",
- best_of=0,
- echo=True,
- frequency_penalty=-2,
- logit_bias={"foo": 0},
- logprobs=0,
- max_tokens=16,
- n=1,
- presence_penalty=-2,
- seed=0,
- stop="\n",
- stream=True,
- stream_options={"include_usage": True},
- suffix="test.",
- temperature=1,
- top_p=1,
- user="user-1234",
- )
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.completions.with_raw_response.create(
- model="string",
- prompt="This is a test.",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = response.parse()
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.completions.with_streaming_response.create(
- model="string",
- prompt="This is a test.",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = response.parse()
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncCompletions:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.completions.create(
- model="string",
- prompt="This is a test.",
- )
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- completion = await async_client.completions.create(
- model="string",
- prompt="This is a test.",
- best_of=0,
- echo=True,
- frequency_penalty=-2,
- logit_bias={"foo": 0},
- logprobs=0,
- max_tokens=16,
- n=1,
- presence_penalty=-2,
- seed=0,
- stop="\n",
- stream=True,
- stream_options={"include_usage": True},
- suffix="test.",
- temperature=1,
- top_p=1,
- user="user-1234",
- )
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.completions.with_raw_response.create(
- model="string",
- prompt="This is a test.",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- completion = await response.parse()
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.completions.with_streaming_response.create(
- model="string",
- prompt="This is a test.",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- completion = await response.parse()
- assert_matches_type(CompletionCreateResponse, completion, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py
deleted file mode 100644
index bd3ef322..00000000
--- a/tests/api_resources/test_embeddings.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import EmbeddingCreateResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestEmbeddings:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- embedding = client.embeddings.create(
- input="The quick brown fox jumped over the lazy dog",
- model="text-embedding-3-small",
- )
- assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- embedding = client.embeddings.create(
- input="The quick brown fox jumped over the lazy dog",
- model="text-embedding-3-small",
- dimensions=1,
- encoding_format="float",
- user="user-1234",
- )
- assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.embeddings.with_raw_response.create(
- input="The quick brown fox jumped over the lazy dog",
- model="text-embedding-3-small",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- embedding = response.parse()
- assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.embeddings.with_streaming_response.create(
- input="The quick brown fox jumped over the lazy dog",
- model="text-embedding-3-small",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- embedding = response.parse()
- assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncEmbeddings:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- embedding = await async_client.embeddings.create(
- input="The quick brown fox jumped over the lazy dog",
- model="text-embedding-3-small",
- )
- assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- embedding = await async_client.embeddings.create(
- input="The quick brown fox jumped over the lazy dog",
- model="text-embedding-3-small",
- dimensions=1,
- encoding_format="float",
- user="user-1234",
- )
- assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.embeddings.with_raw_response.create(
- input="The quick brown fox jumped over the lazy dog",
- model="text-embedding-3-small",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- embedding = await response.parse()
- assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.embeddings.with_streaming_response.create(
- input="The quick brown fox jumped over the lazy dog",
- model="text-embedding-3-small",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- embedding = await response.parse()
- assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py
deleted file mode 100644
index b30ae859..00000000
--- a/tests/api_resources/test_files.py
+++ /dev/null
@@ -1,430 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- OpenAIFile,
- FileListResponse,
- FileDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestFiles:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.retrieve(
- "file_id",
- )
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.files.with_raw_response.retrieve(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.files.with_streaming_response.retrieve(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.files.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.list()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.list(
- after="after",
- limit=0,
- order="asc",
- purpose="purpose",
- )
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.files.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.files.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.delete(
- "file_id",
- )
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.files.with_raw_response.delete(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.files.with_streaming_response.delete(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.files.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.retrieve_content(
- "file_id",
- )
- assert_matches_type(str, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.files.with_raw_response.retrieve_content(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(str, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- with client.files.with_streaming_response.retrieve_content(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(str, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.files.with_raw_response.retrieve_content(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_upload(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.files.upload(
- file=b"raw file contents",
- purpose="assistants",
- )
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_upload(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.files.with_raw_response.upload(
- file=b"raw file contents",
- purpose="assistants",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_upload(self, client: DigitaloceanGenaiSDK) -> None:
- with client.files.with_streaming_response.upload(
- file=b"raw file contents",
- purpose="assistants",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncFiles:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.retrieve(
- "file_id",
- )
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.files.with_raw_response.retrieve(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.files.with_streaming_response.retrieve(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.files.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.list()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.list(
- after="after",
- limit=0,
- order="asc",
- purpose="purpose",
- )
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.files.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.files.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(FileListResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.delete(
- "file_id",
- )
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.files.with_raw_response.delete(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.files.with_streaming_response.delete(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.files.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.retrieve_content(
- "file_id",
- )
- assert_matches_type(str, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.files.with_raw_response.retrieve_content(
- "file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(str, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.files.with_streaming_response.retrieve_content(
- "file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(str, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.files.with_raw_response.retrieve_content(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.files.upload(
- file=b"raw file contents",
- purpose="assistants",
- )
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.files.with_raw_response.upload(
- file=b"raw file contents",
- purpose="assistants",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.files.with_streaming_response.upload(
- file=b"raw file contents",
- purpose="assistants",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(OpenAIFile, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_gpu_droplets.py b/tests/api_resources/test_gpu_droplets.py
new file mode 100644
index 00000000..32a26a9a
--- /dev/null
+++ b/tests/api_resources/test_gpu_droplets.py
@@ -0,0 +1,912 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types import (
+ GPUDropletListResponse,
+ GPUDropletCreateResponse,
+ GPUDropletRetrieveResponse,
+ GPUDropletListKernelsResponse,
+ GPUDropletListFirewallsResponse,
+ GPUDropletListNeighborsResponse,
+ GPUDropletListSnapshotsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestGPUDroplets:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_1(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.create(
+ image="ubuntu-20-04-x64",
+ name="example.com",
+ size="s-1vcpu-1gb",
+ )
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.create(
+ image="ubuntu-20-04-x64",
+ name="example.com",
+ size="s-1vcpu-1gb",
+ backup_policy={
+ "hour": 0,
+ "plan": "daily",
+ "weekday": "SUN",
+ },
+ backups=True,
+ ipv6=True,
+ monitoring=True,
+ private_networking=True,
+ region="nyc3",
+ ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ tags=["env:prod", "web"],
+ user_data="#cloud-config\nruncmd:\n - touch /test.txt\n",
+ volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"],
+ vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000",
+ with_droplet_agent=True,
+ )
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_1(self, client: Gradient) -> None:
+ response = client.gpu_droplets.with_raw_response.create(
+ image="ubuntu-20-04-x64",
+ name="example.com",
+ size="s-1vcpu-1gb",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
+ with client.gpu_droplets.with_streaming_response.create(
+ image="ubuntu-20-04-x64",
+ name="example.com",
+ size="s-1vcpu-1gb",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_overload_2(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.create(
+ image="ubuntu-20-04-x64",
+ names=["sub-01.example.com", "sub-02.example.com"],
+ size="s-1vcpu-1gb",
+ )
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.create(
+ image="ubuntu-20-04-x64",
+ names=["sub-01.example.com", "sub-02.example.com"],
+ size="s-1vcpu-1gb",
+ backup_policy={
+ "hour": 0,
+ "plan": "daily",
+ "weekday": "SUN",
+ },
+ backups=True,
+ ipv6=True,
+ monitoring=True,
+ private_networking=True,
+ region="nyc3",
+ ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ tags=["env:prod", "web"],
+ user_data="#cloud-config\nruncmd:\n - touch /test.txt\n",
+ volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"],
+ vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000",
+ with_droplet_agent=True,
+ )
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
+ response = client.gpu_droplets.with_raw_response.create(
+ image="ubuntu-20-04-x64",
+ names=["sub-01.example.com", "sub-02.example.com"],
+ size="s-1vcpu-1gb",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
+ with client.gpu_droplets.with_streaming_response.create(
+ image="ubuntu-20-04-x64",
+ names=["sub-01.example.com", "sub-02.example.com"],
+ size="s-1vcpu-1gb",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.retrieve(
+ 3164444,
+ )
+ assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.gpu_droplets.with_raw_response.retrieve(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.gpu_droplets.with_streaming_response.retrieve(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.list()
+ assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.list(
+ name="name",
+ page=1,
+ per_page=1,
+ tag_name="tag_name",
+ type="droplets",
+ )
+ assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.gpu_droplets.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.gpu_droplets.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.delete(
+ 3164444,
+ )
+ assert gpu_droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.gpu_droplets.with_raw_response.delete(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = response.parse()
+ assert gpu_droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.gpu_droplets.with_streaming_response.delete(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = response.parse()
+ assert gpu_droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete_by_tag(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.delete_by_tag(
+ tag_name="tag_name",
+ )
+ assert gpu_droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete_by_tag(self, client: Gradient) -> None:
+ response = client.gpu_droplets.with_raw_response.delete_by_tag(
+ tag_name="tag_name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = response.parse()
+ assert gpu_droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete_by_tag(self, client: Gradient) -> None:
+ with client.gpu_droplets.with_streaming_response.delete_by_tag(
+ tag_name="tag_name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = response.parse()
+ assert gpu_droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_firewalls(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.list_firewalls(
+ droplet_id=3164444,
+ )
+ assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_firewalls_with_all_params(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.list_firewalls(
+ droplet_id=3164444,
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_firewalls(self, client: Gradient) -> None:
+ response = client.gpu_droplets.with_raw_response.list_firewalls(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_firewalls(self, client: Gradient) -> None:
+ with client.gpu_droplets.with_streaming_response.list_firewalls(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_kernels(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.list_kernels(
+ droplet_id=3164444,
+ )
+ assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_kernels_with_all_params(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.list_kernels(
+ droplet_id=3164444,
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_kernels(self, client: Gradient) -> None:
+ response = client.gpu_droplets.with_raw_response.list_kernels(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_kernels(self, client: Gradient) -> None:
+ with client.gpu_droplets.with_streaming_response.list_kernels(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_neighbors(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.list_neighbors(
+ 3164444,
+ )
+ assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_neighbors(self, client: Gradient) -> None:
+ response = client.gpu_droplets.with_raw_response.list_neighbors(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_neighbors(self, client: Gradient) -> None:
+ with client.gpu_droplets.with_streaming_response.list_neighbors(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_snapshots(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.list_snapshots(
+ droplet_id=3164444,
+ )
+ assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_snapshots_with_all_params(self, client: Gradient) -> None:
+ gpu_droplet = client.gpu_droplets.list_snapshots(
+ droplet_id=3164444,
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_snapshots(self, client: Gradient) -> None:
+ response = client.gpu_droplets.with_raw_response.list_snapshots(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_snapshots(self, client: Gradient) -> None:
+ with client.gpu_droplets.with_streaming_response.list_snapshots(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = response.parse()
+ assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncGPUDroplets:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.create(
+ image="ubuntu-20-04-x64",
+ name="example.com",
+ size="s-1vcpu-1gb",
+ )
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.create(
+ image="ubuntu-20-04-x64",
+ name="example.com",
+ size="s-1vcpu-1gb",
+ backup_policy={
+ "hour": 0,
+ "plan": "daily",
+ "weekday": "SUN",
+ },
+ backups=True,
+ ipv6=True,
+ monitoring=True,
+ private_networking=True,
+ region="nyc3",
+ ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ tags=["env:prod", "web"],
+ user_data="#cloud-config\nruncmd:\n - touch /test.txt\n",
+ volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"],
+ vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000",
+ with_droplet_agent=True,
+ )
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.with_raw_response.create(
+ image="ubuntu-20-04-x64",
+ name="example.com",
+ size="s-1vcpu-1gb",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.with_streaming_response.create(
+ image="ubuntu-20-04-x64",
+ name="example.com",
+ size="s-1vcpu-1gb",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.create(
+ image="ubuntu-20-04-x64",
+ names=["sub-01.example.com", "sub-02.example.com"],
+ size="s-1vcpu-1gb",
+ )
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.create(
+ image="ubuntu-20-04-x64",
+ names=["sub-01.example.com", "sub-02.example.com"],
+ size="s-1vcpu-1gb",
+ backup_policy={
+ "hour": 0,
+ "plan": "daily",
+ "weekday": "SUN",
+ },
+ backups=True,
+ ipv6=True,
+ monitoring=True,
+ private_networking=True,
+ region="nyc3",
+ ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"],
+ tags=["env:prod", "web"],
+ user_data="#cloud-config\nruncmd:\n - touch /test.txt\n",
+ volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"],
+ vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000",
+ with_droplet_agent=True,
+ )
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.with_raw_response.create(
+ image="ubuntu-20-04-x64",
+ names=["sub-01.example.com", "sub-02.example.com"],
+ size="s-1vcpu-1gb",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.with_streaming_response.create(
+ image="ubuntu-20-04-x64",
+ names=["sub-01.example.com", "sub-02.example.com"],
+ size="s-1vcpu-1gb",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.retrieve(
+ 3164444,
+ )
+ assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.with_raw_response.retrieve(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.with_streaming_response.retrieve(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.list()
+ assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.list(
+ name="name",
+ page=1,
+ per_page=1,
+ tag_name="tag_name",
+ type="droplets",
+ )
+ assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.delete(
+ 3164444,
+ )
+ assert gpu_droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.with_raw_response.delete(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = await response.parse()
+ assert gpu_droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.with_streaming_response.delete(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = await response.parse()
+ assert gpu_droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete_by_tag(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.delete_by_tag(
+ tag_name="tag_name",
+ )
+ assert gpu_droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete_by_tag(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.with_raw_response.delete_by_tag(
+ tag_name="tag_name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = await response.parse()
+ assert gpu_droplet is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete_by_tag(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.with_streaming_response.delete_by_tag(
+ tag_name="tag_name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = await response.parse()
+ assert gpu_droplet is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_firewalls(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.list_firewalls(
+ droplet_id=3164444,
+ )
+ assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_firewalls_with_all_params(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.list_firewalls(
+ droplet_id=3164444,
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_firewalls(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.with_raw_response.list_firewalls(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_firewalls(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.with_streaming_response.list_firewalls(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_kernels(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.list_kernels(
+ droplet_id=3164444,
+ )
+ assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_kernels_with_all_params(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.list_kernels(
+ droplet_id=3164444,
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_kernels(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.with_raw_response.list_kernels(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_kernels(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.with_streaming_response.list_kernels(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_neighbors(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.list_neighbors(
+ 3164444,
+ )
+ assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_neighbors(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.with_raw_response.list_neighbors(
+ 3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_neighbors(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.with_streaming_response.list_neighbors(
+ 3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_snapshots(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.list_snapshots(
+ droplet_id=3164444,
+ )
+ assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_snapshots_with_all_params(self, async_client: AsyncGradient) -> None:
+ gpu_droplet = await async_client.gpu_droplets.list_snapshots(
+ droplet_id=3164444,
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_snapshots(self, async_client: AsyncGradient) -> None:
+ response = await async_client.gpu_droplets.with_raw_response.list_snapshots(
+ droplet_id=3164444,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_snapshots(self, async_client: AsyncGradient) -> None:
+ async with async_client.gpu_droplets.with_streaming_response.list_snapshots(
+ droplet_id=3164444,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ gpu_droplet = await response.parse()
+ assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py
index 380a0759..981570c2 100644
--- a/tests/api_resources/test_images.py
+++ b/tests/api_resources/test_images.py
@@ -7,11 +7,9 @@
import pytest
+from gradient import Gradient, AsyncGradient
from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- ImagesResponse,
-)
+from gradient.types import ImageGenerateResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -19,302 +17,224 @@
class TestImages:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_create_edit(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
+ def test_method_generate_overload_1(self, client: Gradient) -> None:
+ image = client.images.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
)
- assert_matches_type(ImagesResponse, image, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_create_edit_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
- mask=b"raw file contents",
- model="dall-e-2",
+ def test_method_generate_with_all_params_overload_1(self, client: Gradient) -> None:
+ image = client.images.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ background="auto",
+ model="openai-gpt-image-1",
+ moderation="auto",
n=1,
- response_format="url",
- size="1024x1024",
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="auto",
+ size="auto",
+ stream=False,
user="user-1234",
)
- assert_matches_type(ImagesResponse, image, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_raw_response_create_edit(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.images.with_raw_response.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
+ def test_raw_response_generate_overload_1(self, client: Gradient) -> None:
+ response = client.images.with_raw_response.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_streaming_response_create_edit(self, client: DigitaloceanGenaiSDK) -> None:
- with client.images.with_streaming_response.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
+ def test_streaming_response_generate_overload_1(self, client: Gradient) -> None:
+ with client.images.with_streaming_response.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_create_generation(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_generation(
- prompt="A cute baby sea otter",
+ def test_method_generate_overload_2(self, client: Gradient) -> None:
+ image_stream = client.images.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
)
- assert_matches_type(ImagesResponse, image, path=["response"])
+ image_stream.response.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_create_generation_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_generation(
- prompt="A cute baby sea otter",
- model="dall-e-3",
+ def test_method_generate_with_all_params_overload_2(self, client: Gradient) -> None:
+ image_stream = client.images.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
+ background="auto",
+ model="openai-gpt-image-1",
+ moderation="auto",
n=1,
- quality="standard",
- response_format="url",
- size="1024x1024",
- style="vivid",
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="auto",
+ size="auto",
user="user-1234",
)
- assert_matches_type(ImagesResponse, image, path=["response"])
+ image_stream.response.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_raw_response_create_generation(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.images.with_raw_response.create_generation(
- prompt="A cute baby sea otter",
+ def test_raw_response_generate_overload_2(self, client: Gradient) -> None:
+ response = client.images.with_raw_response.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
)
- assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
+ stream = response.parse()
+ stream.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_streaming_response_create_generation(self, client: DigitaloceanGenaiSDK) -> None:
- with client.images.with_streaming_response.create_generation(
- prompt="A cute baby sea otter",
+ def test_streaming_response_generate_overload_2(self, client: Gradient) -> None:
+ with client.images.with_streaming_response.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_variation(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_variation(
- image=b"raw file contents",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_variation_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- image = client.images.create_variation(
- image=b"raw file contents",
- model="dall-e-2",
- n=1,
- response_format="url",
- size="1024x1024",
- user="user-1234",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create_variation(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.images.with_raw_response.create_variation(
- image=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create_variation(self, client: DigitaloceanGenaiSDK) -> None:
- with client.images.with_streaming_response.create_variation(
- image=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- image = response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
+ stream = response.parse()
+ stream.close()
assert cast(Any, response.is_closed) is True
class TestAsyncImages:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
+ async def test_method_generate_overload_1(self, async_client: AsyncGradient) -> None:
+ image = await async_client.images.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
)
- assert_matches_type(ImagesResponse, image, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_create_edit_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
- mask=b"raw file contents",
- model="dall-e-2",
+ async def test_method_generate_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ image = await async_client.images.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ background="auto",
+ model="openai-gpt-image-1",
+ moderation="auto",
n=1,
- response_format="url",
- size="1024x1024",
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="auto",
+ size="auto",
+ stream=False,
user="user-1234",
)
- assert_matches_type(ImagesResponse, image, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_raw_response_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.images.with_raw_response.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
+ async def test_raw_response_generate_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.images.with_raw_response.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_streaming_response_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.images.with_streaming_response.create_edit(
- image=b"raw file contents",
- prompt="A cute baby sea otter wearing a beret",
+ async def test_streaming_response_generate_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.images.with_streaming_response.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_generation(
- prompt="A cute baby sea otter",
+ async def test_method_generate_overload_2(self, async_client: AsyncGradient) -> None:
+ image_stream = await async_client.images.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
)
- assert_matches_type(ImagesResponse, image, path=["response"])
+ await image_stream.response.aclose()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_create_generation_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_generation(
- prompt="A cute baby sea otter",
- model="dall-e-3",
+ async def test_method_generate_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ image_stream = await async_client.images.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
+ background="auto",
+ model="openai-gpt-image-1",
+ moderation="auto",
n=1,
- quality="standard",
- response_format="url",
- size="1024x1024",
- style="vivid",
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="auto",
+ size="auto",
user="user-1234",
)
- assert_matches_type(ImagesResponse, image, path=["response"])
+ await image_stream.response.aclose()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_raw_response_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.images.with_raw_response.create_generation(
- prompt="A cute baby sea otter",
+ async def test_raw_response_generate_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.images.with_raw_response.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
)
- assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
+ stream = await response.parse()
+ await stream.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_streaming_response_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.images.with_streaming_response.create_generation(
- prompt="A cute baby sea otter",
+ async def test_streaming_response_generate_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.images.with_streaming_response.generate(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_variation(
- image=b"raw file contents",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_variation_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- image = await async_client.images.create_variation(
- image=b"raw file contents",
- model="dall-e-2",
- n=1,
- response_format="url",
- size="1024x1024",
- user="user-1234",
- )
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.images.with_raw_response.create_variation(
- image=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.images.with_streaming_response.create_variation(
- image=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- image = await response.parse()
- assert_matches_type(ImagesResponse, image, path=["response"])
+ stream = await response.parse()
+ await stream.close()
assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py
new file mode 100644
index 00000000..a7c0d5e4
--- /dev/null
+++ b/tests/api_resources/test_knowledge_bases.py
@@ -0,0 +1,835 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types import (
+ KnowledgeBaseListResponse,
+ KnowledgeBaseCreateResponse,
+ KnowledgeBaseDeleteResponse,
+ KnowledgeBaseUpdateResponse,
+ KnowledgeBaseRetrieveResponse,
+ KnowledgeBaseListIndexingJobsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestKnowledgeBases:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ knowledge_base = client.knowledge_bases.create()
+ assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ knowledge_base = client.knowledge_bases.create(
+ database_id='"12345678-1234-1234-1234-123456789012"',
+ datasources=[
+ {
+ "aws_data_source": {
+ "bucket_name": "example name",
+ "item_path": "example string",
+ "key_id": "123e4567-e89b-12d3-a456-426614174000",
+ "region": "example string",
+ "secret_key": "example string",
+ },
+ "bucket_name": "example name",
+ "bucket_region": "example string",
+ "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED",
+ "chunking_options": {
+ "child_chunk_size": 350,
+ "max_chunk_size": 750,
+ "parent_chunk_size": 1000,
+ "semantic_threshold": 0.5,
+ },
+ "dropbox_data_source": {
+ "folder": "example string",
+ "refresh_token": "example string",
+ },
+ "file_upload_data_source": {
+ "original_file_name": "example name",
+ "size_in_bytes": "12345",
+ "stored_object_key": "example string",
+ },
+ "google_drive_data_source": {
+ "folder_id": "123e4567-e89b-12d3-a456-426614174000",
+ "refresh_token": "example string",
+ },
+ "item_path": "example string",
+ "spaces_data_source": {
+ "bucket_name": "example name",
+ "item_path": "example string",
+ "region": "example string",
+ },
+ "web_crawler_data_source": {
+ "base_url": "example string",
+ "crawling_option": "UNKNOWN",
+ "embed_media": True,
+ "exclude_tags": ["example string"],
+ },
+ }
+ ],
+ embedding_model_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"My Knowledge Base"',
+ project_id='"12345678-1234-1234-1234-123456789012"',
+ region='"tor1"',
+ tags=["example string"],
+ vpc_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.knowledge_bases.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.knowledge_bases.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ knowledge_base = client.knowledge_bases.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.knowledge_bases.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.knowledge_bases.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.knowledge_bases.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update(self, client: Gradient) -> None:
+ knowledge_base = client.knowledge_bases.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_update_with_all_params(self, client: Gradient) -> None:
+ knowledge_base = client.knowledge_bases.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ database_id='"12345678-1234-1234-1234-123456789012"',
+ embedding_model_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"My Knowledge Base"',
+ project_id='"12345678-1234-1234-1234-123456789012"',
+ tags=["example string"],
+ body_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_update(self, client: Gradient) -> None:
+ response = client.knowledge_bases.with_raw_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_update(self, client: Gradient) -> None:
+ with client.knowledge_bases.with_streaming_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_update(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
+ client.knowledge_bases.with_raw_response.update(
+ path_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ knowledge_base = client.knowledge_bases.list()
+ assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ knowledge_base = client.knowledge_bases.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.knowledge_bases.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.knowledge_bases.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ knowledge_base = client.knowledge_bases.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.knowledge_bases.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.knowledge_bases.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.knowledge_bases.with_raw_response.delete(
+ "",
+ )
+
+ @parametrize
+ def test_method_wait_for_database_success(self, client: Gradient) -> None:
+ """Test wait_for_database with successful database status transition."""
+ from unittest.mock import Mock
+
+ call_count = [0]
+
+ def mock_retrieve(uuid: str, **kwargs: object) -> Mock: # noqa: ARG001
+ call_count[0] += 1
+ response = Mock()
+ # Simulate CREATING -> ONLINE transition
+ response.database_status = "CREATING" if call_count[0] == 1 else "ONLINE"
+ return response
+
+ client.knowledge_bases.retrieve = mock_retrieve # type: ignore[method-assign]
+
+ result = client.knowledge_bases.wait_for_database(
+ "test-uuid",
+ timeout=10.0,
+ poll_interval=0.1,
+ )
+
+ assert result.database_status == "ONLINE"
+ assert call_count[0] == 2
+
+ @parametrize
+ def test_method_wait_for_database_failed_state(self, client: Gradient) -> None:
+ """Test wait_for_database with failed database status."""
+ from unittest.mock import Mock
+
+ from gradient.resources.knowledge_bases import KnowledgeBaseDatabaseError
+
+ def mock_retrieve(uuid: str, **kwargs: object) -> Mock: # noqa: ARG001
+ response = Mock()
+ response.database_status = "UNHEALTHY"
+ return response
+
+ client.knowledge_bases.retrieve = mock_retrieve # type: ignore[method-assign]
+
+ with pytest.raises(KnowledgeBaseDatabaseError, match="UNHEALTHY"):
+ client.knowledge_bases.wait_for_database(
+ "test-uuid",
+ timeout=10.0,
+ poll_interval=0.1,
+ )
+
+ @parametrize
+ def test_method_wait_for_database_timeout(self, client: Gradient) -> None:
+ """Test wait_for_database with timeout."""
+ from unittest.mock import Mock
+
+ from gradient.resources.knowledge_bases import KnowledgeBaseTimeoutError
+
+ def mock_retrieve(uuid: str, **kwargs: object) -> Mock: # noqa: ARG001
+ response = Mock()
+ response.database_status = "CREATING"
+ return response
+
+ client.knowledge_bases.retrieve = mock_retrieve # type: ignore[method-assign]
+
+ with pytest.raises(KnowledgeBaseTimeoutError):
+ client.knowledge_bases.wait_for_database(
+ "test-uuid",
+ timeout=0.3,
+ poll_interval=0.1,
+ )
+
+ @parametrize
+ def test_method_wait_for_database_decommissioned(self, client: Gradient) -> None:
+ """Test wait_for_database with DECOMMISSIONED status."""
+ from unittest.mock import Mock
+
+ from gradient.resources.knowledge_bases import KnowledgeBaseDatabaseError
+
+ def mock_retrieve(uuid: str, **kwargs: object) -> Mock: # noqa: ARG001
+ response = Mock()
+ response.database_status = "DECOMMISSIONED"
+ return response
+
+ client.knowledge_bases.retrieve = mock_retrieve # type: ignore[method-assign]
+
+ with pytest.raises(KnowledgeBaseDatabaseError, match="DECOMMISSIONED"):
+ client.knowledge_bases.wait_for_database(
+ "test-uuid",
+ timeout=10.0,
+ poll_interval=0.1,
+ )
+
+ @parametrize
+ def test_path_params_wait_for_database(self, client: Gradient) -> None:
+ """Test wait_for_database validates uuid parameter."""
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.knowledge_bases.wait_for_database(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_indexing_jobs(self, client: Gradient) -> None:
+ knowledge_base = client.knowledge_bases.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list_indexing_jobs(self, client: Gradient) -> None:
+ response = client.knowledge_bases.with_raw_response.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list_indexing_jobs(self, client: Gradient) -> None:
+ with client.knowledge_bases.with_streaming_response.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_list_indexing_jobs(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ client.knowledge_bases.with_raw_response.list_indexing_jobs(
+ "",
+ )
+
+
+class TestAsyncKnowledgeBases:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.knowledge_bases.create()
+ assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.knowledge_bases.create(
+ database_id='"12345678-1234-1234-1234-123456789012"',
+ datasources=[
+ {
+ "aws_data_source": {
+ "bucket_name": "example name",
+ "item_path": "example string",
+ "key_id": "123e4567-e89b-12d3-a456-426614174000",
+ "region": "example string",
+ "secret_key": "example string",
+ },
+ "bucket_name": "example name",
+ "bucket_region": "example string",
+ "chunking_algorithm": "CHUNKING_ALGORITHM_SECTION_BASED",
+ "chunking_options": {
+ "child_chunk_size": 350,
+ "max_chunk_size": 750,
+ "parent_chunk_size": 1000,
+ "semantic_threshold": 0.5,
+ },
+ "dropbox_data_source": {
+ "folder": "example string",
+ "refresh_token": "example string",
+ },
+ "file_upload_data_source": {
+ "original_file_name": "example name",
+ "size_in_bytes": "12345",
+ "stored_object_key": "example string",
+ },
+ "google_drive_data_source": {
+ "folder_id": "123e4567-e89b-12d3-a456-426614174000",
+ "refresh_token": "example string",
+ },
+ "item_path": "example string",
+ "spaces_data_source": {
+ "bucket_name": "example name",
+ "item_path": "example string",
+ "region": "example string",
+ },
+ "web_crawler_data_source": {
+ "base_url": "example string",
+ "crawling_option": "UNKNOWN",
+ "embed_media": True,
+ "exclude_tags": ["example string"],
+ },
+ }
+ ],
+ embedding_model_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"My Knowledge Base"',
+ project_id='"12345678-1234-1234-1234-123456789012"',
+ region='"tor1"',
+ tags=["example string"],
+ vpc_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.knowledge_bases.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.knowledge_bases.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.knowledge_bases.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.knowledge_bases.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ database_id='"12345678-1234-1234-1234-123456789012"',
+ embedding_model_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"My Knowledge Base"',
+ project_id='"12345678-1234-1234-1234-123456789012"',
+ tags=["example string"],
+ body_uuid='"12345678-1234-1234-1234-123456789012"',
+ )
+ assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.with_raw_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.with_streaming_response.update(
+ path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
+ await async_client.knowledge_bases.with_raw_response.update(
+ path_uuid="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.knowledge_bases.list()
+ assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.knowledge_bases.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.knowledge_bases.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.knowledge_bases.with_raw_response.delete(
+ "",
+ )
+
+ @parametrize
+ async def test_method_wait_for_database_success(self, async_client: AsyncGradient) -> None:
+ """Test async wait_for_database with successful database status transition."""
+ from unittest.mock import Mock
+
+ call_count = [0]
+
+ async def mock_retrieve(uuid: str, **kwargs: object) -> Mock: # noqa: ARG001
+ call_count[0] += 1
+ response = Mock()
+ # Simulate CREATING -> ONLINE transition
+ response.database_status = "CREATING" if call_count[0] == 1 else "ONLINE"
+ return response
+
+ async_client.knowledge_bases.retrieve = mock_retrieve # type: ignore[method-assign]
+
+ result = await async_client.knowledge_bases.wait_for_database(
+ "test-uuid",
+ timeout=10.0,
+ poll_interval=0.1,
+ )
+
+ assert result.database_status == "ONLINE"
+ assert call_count[0] == 2
+
+ @parametrize
+ async def test_method_wait_for_database_failed_state(self, async_client: AsyncGradient) -> None:
+ """Test async wait_for_database with failed database status."""
+ from unittest.mock import Mock
+
+ from gradient.resources.knowledge_bases import KnowledgeBaseDatabaseError
+
+ async def mock_retrieve(uuid: str, **kwargs: object) -> Mock: # noqa: ARG001
+ response = Mock()
+ response.database_status = "UNHEALTHY"
+ return response
+
+ async_client.knowledge_bases.retrieve = mock_retrieve # type: ignore[method-assign]
+
+ with pytest.raises(KnowledgeBaseDatabaseError, match="UNHEALTHY"):
+ await async_client.knowledge_bases.wait_for_database(
+ "test-uuid",
+ timeout=10.0,
+ poll_interval=0.1,
+ )
+
+ @parametrize
+ async def test_method_wait_for_database_timeout(self, async_client: AsyncGradient) -> None:
+ """Test async wait_for_database with timeout."""
+ from unittest.mock import Mock
+
+ from gradient.resources.knowledge_bases import KnowledgeBaseTimeoutError
+
+ async def mock_retrieve(uuid: str, **kwargs: object) -> Mock: # noqa: ARG001
+ response = Mock()
+ response.database_status = "CREATING"
+ return response
+
+ async_client.knowledge_bases.retrieve = mock_retrieve # type: ignore[method-assign]
+
+ with pytest.raises(KnowledgeBaseTimeoutError):
+ await async_client.knowledge_bases.wait_for_database(
+ "test-uuid",
+ timeout=0.3,
+ poll_interval=0.1,
+ )
+
+ @parametrize
+ async def test_method_wait_for_database_decommissioned(self, async_client: AsyncGradient) -> None:
+ """Test async wait_for_database with DECOMMISSIONED status."""
+ from unittest.mock import Mock
+
+ from gradient.resources.knowledge_bases import KnowledgeBaseDatabaseError
+
+ async def mock_retrieve(uuid: str, **kwargs: object) -> Mock: # noqa: ARG001
+ response = Mock()
+ response.database_status = "DECOMMISSIONED"
+ return response
+
+ async_client.knowledge_bases.retrieve = mock_retrieve # type: ignore[method-assign]
+
+ with pytest.raises(KnowledgeBaseDatabaseError, match="DECOMMISSIONED"):
+ await async_client.knowledge_bases.wait_for_database(
+ "test-uuid",
+ timeout=10.0,
+ poll_interval=0.1,
+ )
+
+ @parametrize
+ async def test_path_params_wait_for_database(self, async_client: AsyncGradient) -> None:
+ """Test async wait_for_database validates uuid parameter."""
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.knowledge_bases.wait_for_database(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_indexing_jobs(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.knowledge_bases.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list_indexing_jobs(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.with_raw_response.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_indexing_jobs(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.with_streaming_response.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_list_indexing_jobs(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ await async_client.knowledge_bases.with_raw_response.list_indexing_jobs(
+ "",
+ )
diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py
index aa215415..d2dc075a 100644
--- a/tests/api_resources/test_models.py
+++ b/tests/api_resources/test_models.py
@@ -7,9 +7,9 @@
import pytest
+from gradient import Gradient, AsyncGradient
from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import Model, ModelListResponse, ModelDeleteResponse
+from gradient.types import ModelListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -17,57 +17,26 @@
class TestModels:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- model = client.models.retrieve(
- "gpt-4o-mini",
- )
- assert_matches_type(Model, model, path=["response"])
+ def test_method_list(self, client: Gradient) -> None:
+ model = client.models.list()
+ assert_matches_type(ModelListResponse, model, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.models.with_raw_response.retrieve(
- "gpt-4o-mini",
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ model = client.models.list(
+ page=0,
+ per_page=0,
+ public_only=True,
+ usecases=["MODEL_USECASE_UNKNOWN"],
)
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = response.parse()
- assert_matches_type(Model, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.models.with_streaming_response.retrieve(
- "gpt-4o-mini",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = response.parse()
- assert_matches_type(Model, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
- client.models.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- model = client.models.list()
assert_matches_type(ModelListResponse, model, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_raw_response_list(self, client: Gradient) -> None:
response = client.models.with_raw_response.list()
assert response.is_closed is True
@@ -75,9 +44,9 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
model = response.parse()
assert_matches_type(ModelListResponse, model, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_streaming_response_list(self, client: Gradient) -> None:
with client.models.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -87,103 +56,32 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- model = client.models.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- )
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.models.with_raw_response.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = response.parse()
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.models.with_streaming_response.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = response.parse()
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
- client.models.with_raw_response.delete(
- "",
- )
-
class TestAsyncModels:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- model = await async_client.models.retrieve(
- "gpt-4o-mini",
- )
- assert_matches_type(Model, model, path=["response"])
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ model = await async_client.models.list()
+ assert_matches_type(ModelListResponse, model, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.models.with_raw_response.retrieve(
- "gpt-4o-mini",
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ model = await async_client.models.list(
+ page=0,
+ per_page=0,
+ public_only=True,
+ usecases=["MODEL_USECASE_UNKNOWN"],
)
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = await response.parse()
- assert_matches_type(Model, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.models.with_streaming_response.retrieve(
- "gpt-4o-mini",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = await response.parse()
- assert_matches_type(Model, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
- await async_client.models.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- model = await async_client.models.list()
assert_matches_type(ModelListResponse, model, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
response = await async_client.models.with_raw_response.list()
assert response.is_closed is True
@@ -191,9 +89,9 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK)
model = await response.parse()
assert_matches_type(ModelListResponse, model, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
async with async_client.models.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -202,45 +100,3 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena
assert_matches_type(ModelListResponse, model, path=["response"])
assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- model = await async_client.models.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- )
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.models.with_raw_response.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = await response.parse()
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.models.with_streaming_response.delete(
- "ft:gpt-4o-mini:acemeco:suffix:abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = await response.parse()
- assert_matches_type(ModelDeleteResponse, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
- await async_client.models.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/test_moderations.py b/tests/api_resources/test_moderations.py
deleted file mode 100644
index 79d34625..00000000
--- a/tests/api_resources/test_moderations.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import ModerationClassifyResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestModerations:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_classify(self, client: DigitaloceanGenaiSDK) -> None:
- moderation = client.moderations.classify(
- input="I want to kill them.",
- )
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_classify_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- moderation = client.moderations.classify(
- input="I want to kill them.",
- model="omni-moderation-2024-09-26",
- )
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_classify(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.moderations.with_raw_response.classify(
- input="I want to kill them.",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- moderation = response.parse()
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_classify(self, client: DigitaloceanGenaiSDK) -> None:
- with client.moderations.with_streaming_response.classify(
- input="I want to kill them.",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- moderation = response.parse()
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncModerations:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- moderation = await async_client.moderations.classify(
- input="I want to kill them.",
- )
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_classify_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- moderation = await async_client.moderations.classify(
- input="I want to kill them.",
- model="omni-moderation-2024-09-26",
- )
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.moderations.with_raw_response.classify(
- input="I want to kill them.",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- moderation = await response.parse()
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.moderations.with_streaming_response.classify(
- input="I want to kill them.",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- moderation = await response.parse()
- assert_matches_type(ModerationClassifyResponse, moderation, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_nfs.py b/tests/api_resources/test_nfs.py
new file mode 100644
index 00000000..9a09e99b
--- /dev/null
+++ b/tests/api_resources/test_nfs.py
@@ -0,0 +1,985 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types import (
+ NfListResponse,
+ NfCreateResponse,
+ NfRetrieveResponse,
+ NfInitiateActionResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestNfs:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ nf = client.nfs.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ )
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ nf = client.nfs.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ performance_tier="standard",
+ )
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ nf = client.nfs.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_retrieve_with_all_params(self, client: Gradient) -> None:
+ nf = client.nfs.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.retrieve(
+ nfs_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ nf = client.nfs.list()
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ nf = client.nfs.list(
+ region="region",
+ )
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ nf = client.nfs.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+ assert nf is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_delete_with_all_params(self, client: Gradient) -> None:
+ nf = client.nfs.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert nf is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert nf is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert nf is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.delete(
+ nfs_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_action_overload_1(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_action_with_all_params_overload_1(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ params={"size_gib": 2048},
+ region="atl1",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_action_overload_1(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_action_overload_1(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_initiate_action_overload_1(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_action_overload_2(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_action_with_all_params_overload_2(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ params={"name": "daily-backup"},
+ region="atl1",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_action_overload_2(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_action_overload_2(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_initiate_action_overload_2(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_action_overload_3(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_action_with_all_params_overload_3(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ params={"vpc_id": "vpc-id-123"},
+ region="atl1",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_action_overload_3(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_action_overload_3(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_initiate_action_overload_3(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_action_overload_4(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_action_with_all_params_overload_4(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ params={"vpc_id": "vpc-id-123"},
+ region="atl1",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_action_overload_4(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_action_overload_4(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_initiate_action_overload_4(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_action_overload_5(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_initiate_action_with_all_params_overload_5(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ params={"performance_tier": "standard"},
+ region="atl1",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_action_overload_5(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_action_overload_5(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_initiate_action_overload_5(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ type="resize",
+ )
+
+
+class TestAsyncNfs:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ )
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ performance_tier="standard",
+ )
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.retrieve(
+ nfs_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.list()
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.list(
+ region="region",
+ )
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+ assert nf is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_delete_with_all_params(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert nf is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert nf is None
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert nf is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.delete(
+ nfs_id="",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_overload_1(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ params={"size_gib": 2048},
+ region="atl1",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_action_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_action_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_action_overload_1(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_overload_2(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ params={"name": "daily-backup"},
+ region="atl1",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_action_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_action_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_action_overload_2(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_overload_3(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_with_all_params_overload_3(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ params={"vpc_id": "vpc-id-123"},
+ region="atl1",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_action_overload_3(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_action_overload_3(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_action_overload_3(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_overload_4(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_with_all_params_overload_4(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ params={"vpc_id": "vpc-id-123"},
+ region="atl1",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_action_overload_4(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_action_overload_4(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_action_overload_4(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_overload_5(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_with_all_params_overload_5(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ params={"performance_tier": "standard"},
+ region="atl1",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_action_overload_5(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_action_overload_5(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_action_overload_5(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ type="resize",
+ )
diff --git a/tests/api_resources/test_organization.py b/tests/api_resources/test_organization.py
deleted file mode 100644
index 844ed287..00000000
--- a/tests/api_resources/test_organization.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- UsageResponse,
- OrganizationListAuditLogsResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestOrganization:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_get_costs(self, client: DigitaloceanGenaiSDK) -> None:
- organization = client.organization.get_costs(
- start_time=0,
- )
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_get_costs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- organization = client.organization.get_costs(
- start_time=0,
- bucket_width="1d",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_get_costs(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.with_raw_response.get_costs(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- organization = response.parse()
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_get_costs(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.with_streaming_response.get_costs(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- organization = response.parse()
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None:
- organization = client.organization.list_audit_logs()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_audit_logs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- organization = client.organization.list_audit_logs(
- actor_emails=["string"],
- actor_ids=["string"],
- after="after",
- before="before",
- effective_at={
- "gt": 0,
- "gte": 0,
- "lt": 0,
- "lte": 0,
- },
- event_types=["api_key.created"],
- limit=0,
- project_ids=["string"],
- resource_ids=["string"],
- )
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.organization.with_raw_response.list_audit_logs()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- organization = response.parse()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None:
- with client.organization.with_streaming_response.list_audit_logs() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- organization = response.parse()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncOrganization:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- organization = await async_client.organization.get_costs(
- start_time=0,
- )
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_get_costs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- organization = await async_client.organization.get_costs(
- start_time=0,
- bucket_width="1d",
- end_time=0,
- group_by=["project_id"],
- limit=0,
- page="page",
- project_ids=["string"],
- )
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.with_raw_response.get_costs(
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- organization = await response.parse()
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.with_streaming_response.get_costs(
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- organization = await response.parse()
- assert_matches_type(UsageResponse, organization, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- organization = await async_client.organization.list_audit_logs()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_audit_logs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- organization = await async_client.organization.list_audit_logs(
- actor_emails=["string"],
- actor_ids=["string"],
- after="after",
- before="before",
- effective_at={
- "gt": 0,
- "gte": 0,
- "lt": 0,
- "lte": 0,
- },
- event_types=["api_key.created"],
- limit=0,
- project_ids=["string"],
- resource_ids=["string"],
- )
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.organization.with_raw_response.list_audit_logs()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- organization = await response.parse()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.organization.with_streaming_response.list_audit_logs() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- organization = await response.parse()
- assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_realtime.py b/tests/api_resources/test_realtime.py
deleted file mode 100644
index 15797ff9..00000000
--- a/tests/api_resources/test_realtime.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- RealtimeCreateSessionResponse,
- RealtimeCreateTranscriptionSessionResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestRealtime:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_session(self, client: DigitaloceanGenaiSDK) -> None:
- realtime = client.realtime.create_session()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_session_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- realtime = client.realtime.create_session(
- input_audio_format="pcm16",
- input_audio_noise_reduction={"type": "near_field"},
- input_audio_transcription={
- "language": "language",
- "model": "model",
- "prompt": "prompt",
- },
- instructions="instructions",
- max_response_output_tokens=0,
- modalities=["text"],
- model="gpt-4o-realtime-preview",
- output_audio_format="pcm16",
- temperature=0,
- tool_choice="tool_choice",
- tools=[
- {
- "description": "description",
- "name": "name",
- "parameters": {},
- "type": "function",
- }
- ],
- turn_detection={
- "create_response": True,
- "eagerness": "low",
- "interrupt_response": True,
- "prefix_padding_ms": 0,
- "silence_duration_ms": 0,
- "threshold": 0,
- "type": "server_vad",
- },
- voice="ash",
- )
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create_session(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.realtime.with_raw_response.create_session()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- realtime = response.parse()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create_session(self, client: DigitaloceanGenaiSDK) -> None:
- with client.realtime.with_streaming_response.create_session() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- realtime = response.parse()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None:
- realtime = client.realtime.create_transcription_session()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_transcription_session_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- realtime = client.realtime.create_transcription_session(
- include=["string"],
- input_audio_format="pcm16",
- input_audio_noise_reduction={"type": "near_field"},
- input_audio_transcription={
- "language": "language",
- "model": "gpt-4o-transcribe",
- "prompt": "prompt",
- },
- modalities=["text"],
- turn_detection={
- "create_response": True,
- "eagerness": "low",
- "interrupt_response": True,
- "prefix_padding_ms": 0,
- "silence_duration_ms": 0,
- "threshold": 0,
- "type": "server_vad",
- },
- )
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.realtime.with_raw_response.create_transcription_session()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- realtime = response.parse()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None:
- with client.realtime.with_streaming_response.create_transcription_session() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- realtime = response.parse()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncRealtime:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- realtime = await async_client.realtime.create_session()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_session_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- realtime = await async_client.realtime.create_session(
- input_audio_format="pcm16",
- input_audio_noise_reduction={"type": "near_field"},
- input_audio_transcription={
- "language": "language",
- "model": "model",
- "prompt": "prompt",
- },
- instructions="instructions",
- max_response_output_tokens=0,
- modalities=["text"],
- model="gpt-4o-realtime-preview",
- output_audio_format="pcm16",
- temperature=0,
- tool_choice="tool_choice",
- tools=[
- {
- "description": "description",
- "name": "name",
- "parameters": {},
- "type": "function",
- }
- ],
- turn_detection={
- "create_response": True,
- "eagerness": "low",
- "interrupt_response": True,
- "prefix_padding_ms": 0,
- "silence_duration_ms": 0,
- "threshold": 0,
- "type": "server_vad",
- },
- voice="ash",
- )
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.realtime.with_raw_response.create_session()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- realtime = await response.parse()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.realtime.with_streaming_response.create_session() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- realtime = await response.parse()
- assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_transcription_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- realtime = await async_client.realtime.create_transcription_session()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_transcription_session_with_all_params(
- self, async_client: AsyncDigitaloceanGenaiSDK
- ) -> None:
- realtime = await async_client.realtime.create_transcription_session(
- include=["string"],
- input_audio_format="pcm16",
- input_audio_noise_reduction={"type": "near_field"},
- input_audio_transcription={
- "language": "language",
- "model": "gpt-4o-transcribe",
- "prompt": "prompt",
- },
- modalities=["text"],
- turn_detection={
- "create_response": True,
- "eagerness": "low",
- "interrupt_response": True,
- "prefix_padding_ms": 0,
- "silence_duration_ms": 0,
- "threshold": 0,
- "type": "server_vad",
- },
- )
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create_transcription_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.realtime.with_raw_response.create_transcription_session()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- realtime = await response.parse()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create_transcription_session(
- self, async_client: AsyncDigitaloceanGenaiSDK
- ) -> None:
- async with async_client.realtime.with_streaming_response.create_transcription_session() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- realtime = await response.parse()
- assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py
new file mode 100644
index 00000000..1ba008bb
--- /dev/null
+++ b/tests/api_resources/test_regions.py
@@ -0,0 +1,98 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types import RegionListResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRegions:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ region = client.regions.list()
+ assert_matches_type(RegionListResponse, region, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ region = client.regions.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(RegionListResponse, region, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.regions.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ region = response.parse()
+ assert_matches_type(RegionListResponse, region, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.regions.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ region = response.parse()
+ assert_matches_type(RegionListResponse, region, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncRegions:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ region = await async_client.regions.list()
+ assert_matches_type(RegionListResponse, region, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ region = await async_client.regions.list(
+ page=1,
+ per_page=1,
+ )
+ assert_matches_type(RegionListResponse, region, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.regions.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ region = await response.parse()
+ assert_matches_type(RegionListResponse, region, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.regions.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ region = await response.parse()
+ assert_matches_type(RegionListResponse, region, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
index 4bd7e367..fc6cd4fe 100644
--- a/tests/api_resources/test_responses.py
+++ b/tests/api_resources/test_responses.py
@@ -7,12 +7,9 @@
import pytest
+from gradient import Gradient, AsyncGradient
from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- Response,
- ResponseListInputItemsResponse,
-)
+from gradient.types.shared import CreateResponseResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -20,460 +17,280 @@
class TestResponses:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_method_create_overload_1(self, client: Gradient) -> None:
response = client.responses.create(
- input="string",
- model="gpt-4o",
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
)
- assert_matches_type(Response, response, path=["response"])
+ assert_matches_type(CreateResponseResponse, response, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None:
response = client.responses.create(
- input="string",
- model="gpt-4o",
- include=["file_search_call.results"],
- instructions="instructions",
- max_output_tokens=0,
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
+ instructions="You are a helpful assistant.",
+ max_output_tokens=1024,
+ max_tokens=1024,
metadata={"foo": "string"},
+ modalities=["text"],
parallel_tool_calls=True,
- previous_response_id="previous_response_id",
- reasoning={
- "effort": "low",
- "generate_summary": "concise",
- },
- store=True,
- stream=True,
+ stop="\n",
+ stream=False,
+ stream_options={"include_usage": True},
temperature=1,
- text={"format": {"type": "text"}},
tool_choice="none",
tools=[
{
- "type": "file_search",
- "vector_store_ids": ["string"],
- "filters": {
- "key": "key",
- "type": "eq",
- "value": "string",
- },
- "max_num_results": 0,
- "ranking_options": {
- "ranker": "auto",
- "score_threshold": 0,
- },
+ "type": "function",
+ "description": "description",
+ "name": "name",
+ "parameters": {"foo": "bar"},
}
],
top_p=1,
- truncation="auto",
user="user-1234",
)
- assert_matches_type(Response, response, path=["response"])
+ assert_matches_type(CreateResponseResponse, response, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_raw_response_create_overload_1(self, client: Gradient) -> None:
http_response = client.responses.with_raw_response.create(
- input="string",
- model="gpt-4o",
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
)
assert http_response.is_closed is True
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = http_response.parse()
- assert_matches_type(Response, response, path=["response"])
+ assert_matches_type(CreateResponseResponse, response, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
with client.responses.with_streaming_response.create(
- input="string",
- model="gpt-4o",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- include=["file_search_call.results"],
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- http_response = client.responses.with_raw_response.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.responses.with_streaming_response.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
) as http_response:
assert not http_response.is_closed
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = http_response.parse()
- assert_matches_type(Response, response, path=["response"])
+ assert_matches_type(CreateResponseResponse, response, path=["response"])
assert cast(Any, http_response.is_closed) is True
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- client.responses.with_raw_response.retrieve(
- response_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- )
- assert response is None
-
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- http_response = client.responses.with_raw_response.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = http_response.parse()
- assert response is None
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.responses.with_streaming_response.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = http_response.parse()
- assert response is None
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- client.responses.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_input_items(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.list_input_items(
- response_id="response_id",
+ def test_method_create_overload_2(self, client: Gradient) -> None:
+ response_stream = client.responses.create(
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
+ stream=True,
)
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
+ response_stream.response.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_list_input_items_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.responses.list_input_items(
- response_id="response_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
+ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None:
+ response_stream = client.responses.create(
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
+ stream=True,
+ instructions="You are a helpful assistant.",
+ max_output_tokens=1024,
+ max_tokens=1024,
+ metadata={"foo": "string"},
+ modalities=["text"],
+ parallel_tool_calls=True,
+ stop="\n",
+ stream_options={"include_usage": True},
+ temperature=1,
+ tool_choice="none",
+ tools=[
+ {
+ "type": "function",
+ "description": "description",
+ "name": "name",
+ "parameters": {"foo": "bar"},
+ }
+ ],
+ top_p=1,
+ user="user-1234",
)
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
+ response_stream.response.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_raw_response_list_input_items(self, client: DigitaloceanGenaiSDK) -> None:
- http_response = client.responses.with_raw_response.list_input_items(
- response_id="response_id",
+ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
+ response = client.responses.with_raw_response.create(
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
+ stream=True,
)
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = http_response.parse()
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ stream.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_streaming_response_list_input_items(self, client: DigitaloceanGenaiSDK) -> None:
- with client.responses.with_streaming_response.list_input_items(
- response_id="response_id",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = http_response.parse()
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
+ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
+ with client.responses.with_streaming_response.create(
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assert cast(Any, http_response.is_closed) is True
+ stream = response.parse()
+ stream.close()
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list_input_items(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- client.responses.with_raw_response.list_input_items(
- response_id="",
- )
+ assert cast(Any, response.is_closed) is True
class TestAsyncResponses:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
response = await async_client.responses.create(
- input="string",
- model="gpt-4o",
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
)
- assert_matches_type(Response, response, path=["response"])
+ assert_matches_type(CreateResponseResponse, response, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
response = await async_client.responses.create(
- input="string",
- model="gpt-4o",
- include=["file_search_call.results"],
- instructions="instructions",
- max_output_tokens=0,
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
+ instructions="You are a helpful assistant.",
+ max_output_tokens=1024,
+ max_tokens=1024,
metadata={"foo": "string"},
+ modalities=["text"],
parallel_tool_calls=True,
- previous_response_id="previous_response_id",
- reasoning={
- "effort": "low",
- "generate_summary": "concise",
- },
- store=True,
- stream=True,
+ stop="\n",
+ stream=False,
+ stream_options={"include_usage": True},
temperature=1,
- text={"format": {"type": "text"}},
tool_choice="none",
tools=[
{
- "type": "file_search",
- "vector_store_ids": ["string"],
- "filters": {
- "key": "key",
- "type": "eq",
- "value": "string",
- },
- "max_num_results": 0,
- "ranking_options": {
- "ranker": "auto",
- "score_threshold": 0,
- },
+ "type": "function",
+ "description": "description",
+ "name": "name",
+ "parameters": {"foo": "bar"},
}
],
top_p=1,
- truncation="auto",
user="user-1234",
)
- assert_matches_type(Response, response, path=["response"])
+ assert_matches_type(CreateResponseResponse, response, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
http_response = await async_client.responses.with_raw_response.create(
- input="string",
- model="gpt-4o",
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
)
assert http_response.is_closed is True
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = await http_response.parse()
- assert_matches_type(Response, response, path=["response"])
+ assert_matches_type(CreateResponseResponse, response, path=["response"])
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
async with async_client.responses.with_streaming_response.create(
- input="string",
- model="gpt-4o",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = await http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- include=["file_search_call.results"],
- )
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- http_response = await async_client.responses.with_raw_response.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = await http_response.parse()
- assert_matches_type(Response, response, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.responses.with_streaming_response.retrieve(
- response_id="resp_677efb5139a88190b512bc3fef8e535d",
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
) as http_response:
assert not http_response.is_closed
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = await http_response.parse()
- assert_matches_type(Response, response, path=["response"])
+ assert_matches_type(CreateResponseResponse, response, path=["response"])
assert cast(Any, http_response.is_closed) is True
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- await async_client.responses.with_raw_response.retrieve(
- response_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- )
- assert response is None
-
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- http_response = await async_client.responses.with_raw_response.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- )
-
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = await http_response.parse()
- assert response is None
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.responses.with_streaming_response.delete(
- "resp_677efb5139a88190b512bc3fef8e535d",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = await http_response.parse()
- assert response is None
-
- assert cast(Any, http_response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- await async_client.responses.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.list_input_items(
- response_id="response_id",
+ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response_stream = await async_client.responses.create(
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
+ stream=True,
)
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
+ await response_stream.response.aclose()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_list_input_items_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.responses.list_input_items(
- response_id="response_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ response_stream = await async_client.responses.create(
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
+ stream=True,
+ instructions="You are a helpful assistant.",
+ max_output_tokens=1024,
+ max_tokens=1024,
+ metadata={"foo": "string"},
+ modalities=["text"],
+ parallel_tool_calls=True,
+ stop="\n",
+ stream_options={"include_usage": True},
+ temperature=1,
+ tool_choice="none",
+ tools=[
+ {
+ "type": "function",
+ "description": "description",
+ "name": "name",
+ "parameters": {"foo": "bar"},
+ }
+ ],
+ top_p=1,
+ user="user-1234",
)
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
+ await response_stream.response.aclose()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_raw_response_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- http_response = await async_client.responses.with_raw_response.list_input_items(
- response_id="response_id",
+ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.responses.with_raw_response.create(
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
+ stream=True,
)
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = await http_response.parse()
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = await response.parse()
+ await stream.close()
- @pytest.mark.skip()
+ @pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_streaming_response_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.responses.with_streaming_response.list_input_items(
- response_id="response_id",
- ) as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- response = await http_response.parse()
- assert_matches_type(ResponseListInputItemsResponse, response, path=["response"])
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.responses.with_streaming_response.create(
+ input="Tell me a three-sentence bedtime story about a unicorn.",
+ model="llama3-8b-instruct",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- assert cast(Any, http_response.is_closed) is True
+ stream = await response.parse()
+ await stream.close()
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
- await async_client.responses.with_raw_response.list_input_items(
- response_id="",
- )
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_retrieve.py b/tests/api_resources/test_retrieve.py
new file mode 100644
index 00000000..8b5ed252
--- /dev/null
+++ b/tests/api_resources/test_retrieve.py
@@ -0,0 +1,192 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types import RetrieveDocumentsResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRetrieve:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_documents(self, client: Gradient) -> None:
+ retrieve = client.retrieve.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_method_documents_with_all_params(self, client: Gradient) -> None:
+ retrieve = client.retrieve.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ alpha=0.75,
+ filters={
+ "must": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ "must_not": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ "should": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ },
+ )
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_raw_response_documents(self, client: Gradient) -> None:
+ response = client.retrieve.with_raw_response.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ retrieve = response.parse()
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_streaming_response_documents(self, client: Gradient) -> None:
+ with client.retrieve.with_streaming_response.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ retrieve = response.parse()
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ def test_path_params_documents(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_id` but received ''"):
+ client.retrieve.with_raw_response.documents(
+ knowledge_base_id="",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
+
+
+class TestAsyncRetrieve:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_documents(self, async_client: AsyncGradient) -> None:
+ retrieve = await async_client.retrieve.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_method_documents_with_all_params(self, async_client: AsyncGradient) -> None:
+ retrieve = await async_client.retrieve.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ alpha=0.75,
+ filters={
+ "must": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ "must_not": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ "should": [
+ {
+ "field": "category",
+ "operator": "eq",
+ "value": "documentation",
+ }
+ ],
+ },
+ )
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_raw_response_documents(self, async_client: AsyncGradient) -> None:
+ response = await async_client.retrieve.with_raw_response.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ retrieve = await response.parse()
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_streaming_response_documents(self, async_client: AsyncGradient) -> None:
+ async with async_client.retrieve.with_streaming_response.documents(
+ knowledge_base_id="550e8400-e29b-41d4-a716-446655440000",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ retrieve = await response.parse()
+ assert_matches_type(RetrieveDocumentsResponse, retrieve, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Mock server tests are disabled")
+ @parametrize
+ async def test_path_params_documents(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_id` but received ''"):
+ await async_client.retrieve.with_raw_response.documents(
+ knowledge_base_id="",
+ num_results=5,
+ query="What are the best practices for deploying machine learning models?",
+ )
diff --git a/tests/api_resources/test_threads.py b/tests/api_resources/test_threads.py
deleted file mode 100644
index cca5e067..00000000
--- a/tests/api_resources/test_threads.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import ThreadObject, ThreadDeleteResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestThreads:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.create()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.create(
- messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.retrieve(
- "thread_id",
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.with_raw_response.retrieve(
- "thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.with_streaming_response.retrieve(
- "thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.update(
- thread_id="thread_id",
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.update(
- thread_id="thread_id",
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.with_raw_response.update(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.with_streaming_response.update(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.with_raw_response.update(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- thread = client.threads.delete(
- "thread_id",
- )
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.with_raw_response.delete(
- "thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = response.parse()
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.with_streaming_response.delete(
- "thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = response.parse()
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.with_raw_response.delete(
- "",
- )
-
-
-class TestAsyncThreads:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.create()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.create(
- messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.retrieve(
- "thread_id",
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.with_raw_response.retrieve(
- "thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.with_streaming_response.retrieve(
- "thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.update(
- thread_id="thread_id",
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.update(
- thread_id="thread_id",
- metadata={"foo": "string"},
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- )
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.with_raw_response.update(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.with_streaming_response.update(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = await response.parse()
- assert_matches_type(ThreadObject, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.with_raw_response.update(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- thread = await async_client.threads.delete(
- "thread_id",
- )
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.with_raw_response.delete(
- "thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- thread = await response.parse()
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.with_streaming_response.delete(
- "thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- thread = await response.parse()
- assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.with_raw_response.delete(
- "",
- )
diff --git a/tests/api_resources/test_uploads.py b/tests/api_resources/test_uploads.py
deleted file mode 100644
index 35f52730..00000000
--- a/tests/api_resources/test_uploads.py
+++ /dev/null
@@ -1,399 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- Upload,
- UploadAddPartResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestUploads:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- upload = client.uploads.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.uploads.with_raw_response.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.uploads.with_streaming_response.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_add_part(self, client: DigitaloceanGenaiSDK) -> None:
- upload = client.uploads.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- )
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_add_part(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.uploads.with_raw_response.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = response.parse()
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_add_part(self, client: DigitaloceanGenaiSDK) -> None:
- with client.uploads.with_streaming_response.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = response.parse()
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_add_part(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- client.uploads.with_raw_response.add_part(
- upload_id="",
- data=b"raw file contents",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- upload = client.uploads.cancel(
- "upload_abc123",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.uploads.with_raw_response.cancel(
- "upload_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with client.uploads.with_streaming_response.cancel(
- "upload_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- client.uploads.with_raw_response.cancel(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_complete(self, client: DigitaloceanGenaiSDK) -> None:
- upload = client.uploads.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_complete_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- upload = client.uploads.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- md5="md5",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_complete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.uploads.with_raw_response.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_complete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.uploads.with_streaming_response.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_complete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- client.uploads.with_raw_response.complete(
- upload_id="",
- part_ids=["string"],
- )
-
-
-class TestAsyncUploads:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- upload = await async_client.uploads.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.uploads.with_raw_response.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.uploads.with_streaming_response.create(
- bytes=0,
- filename="filename",
- mime_type="mime_type",
- purpose="assistants",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- upload = await async_client.uploads.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- )
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.uploads.with_raw_response.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = await response.parse()
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.uploads.with_streaming_response.add_part(
- upload_id="upload_abc123",
- data=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = await response.parse()
- assert_matches_type(UploadAddPartResponse, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- await async_client.uploads.with_raw_response.add_part(
- upload_id="",
- data=b"raw file contents",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- upload = await async_client.uploads.cancel(
- "upload_abc123",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.uploads.with_raw_response.cancel(
- "upload_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.uploads.with_streaming_response.cancel(
- "upload_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- await async_client.uploads.with_raw_response.cancel(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- upload = await async_client.uploads.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_complete_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- upload = await async_client.uploads.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- md5="md5",
- )
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.uploads.with_raw_response.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.uploads.with_streaming_response.complete(
- upload_id="upload_abc123",
- part_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- upload = await response.parse()
- assert_matches_type(Upload, upload, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
- await async_client.uploads.with_raw_response.complete(
- upload_id="",
- part_ids=["string"],
- )
diff --git a/tests/api_resources/test_vector_stores.py b/tests/api_resources/test_vector_stores.py
deleted file mode 100644
index 1c8b5fb0..00000000
--- a/tests/api_resources/test_vector_stores.py
+++ /dev/null
@@ -1,603 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types import (
- VectorStoreObject,
- VectorStoreListResponse,
- VectorStoreDeleteResponse,
- VectorStoreSearchResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestVectorStores:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.create()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.create(
- chunking_strategy={"type": "auto"},
- expires_after={
- "anchor": "last_active_at",
- "days": 1,
- },
- file_ids=["string"],
- metadata={"foo": "string"},
- name="name",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.retrieve(
- "vector_store_id",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.retrieve(
- "vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.retrieve(
- "vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.update(
- vector_store_id="vector_store_id",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.update(
- vector_store_id="vector_store_id",
- expires_after={
- "anchor": "last_active_at",
- "days": 1,
- },
- metadata={"foo": "string"},
- name="name",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.update(
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.update(
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.with_raw_response.update(
- vector_store_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.list()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.list(
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.delete(
- "vector_store_id",
- )
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.delete(
- "vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.delete(
- "vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_search(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.search(
- vector_store_id="vs_abc123",
- query="string",
- )
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_search_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- vector_store = client.vector_stores.search(
- vector_store_id="vs_abc123",
- query="string",
- filters={
- "key": "key",
- "type": "eq",
- "value": "string",
- },
- max_num_results=1,
- ranking_options={
- "ranker": "auto",
- "score_threshold": 0,
- },
- rewrite_query=True,
- )
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_search(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.with_raw_response.search(
- vector_store_id="vs_abc123",
- query="string",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = response.parse()
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_search(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.with_streaming_response.search(
- vector_store_id="vs_abc123",
- query="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = response.parse()
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_search(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.with_raw_response.search(
- vector_store_id="",
- query="string",
- )
-
-
-class TestAsyncVectorStores:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.create()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.create(
- chunking_strategy={"type": "auto"},
- expires_after={
- "anchor": "last_active_at",
- "days": 1,
- },
- file_ids=["string"],
- metadata={"foo": "string"},
- name="name",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.retrieve(
- "vector_store_id",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.retrieve(
- "vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.retrieve(
- "vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.update(
- vector_store_id="vector_store_id",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.update(
- vector_store_id="vector_store_id",
- expires_after={
- "anchor": "last_active_at",
- "days": 1,
- },
- metadata={"foo": "string"},
- name="name",
- )
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.update(
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.update(
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreObject, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.with_raw_response.update(
- vector_store_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.list()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.list(
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreListResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.delete(
- "vector_store_id",
- )
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.delete(
- "vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.delete(
- "vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.search(
- vector_store_id="vs_abc123",
- query="string",
- )
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_search_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- vector_store = await async_client.vector_stores.search(
- vector_store_id="vs_abc123",
- query="string",
- filters={
- "key": "key",
- "type": "eq",
- "value": "string",
- },
- max_num_results=1,
- ranking_options={
- "ranker": "auto",
- "score_threshold": 0,
- },
- rewrite_query=True,
- )
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.with_raw_response.search(
- vector_store_id="vs_abc123",
- query="string",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- vector_store = await response.parse()
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.with_streaming_response.search(
- vector_store_id="vs_abc123",
- query="string",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- vector_store = await response.parse()
- assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.with_raw_response.search(
- vector_store_id="",
- query="string",
- )
diff --git a/tests/api_resources/threads/runs/test_steps.py b/tests/api_resources/threads/runs/test_steps.py
deleted file mode 100644
index e972e952..00000000
--- a/tests/api_resources/threads/runs/test_steps.py
+++ /dev/null
@@ -1,307 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.threads.runs import (
- RunStepObject,
- StepListResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestSteps:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- step = client.threads.runs.steps.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- )
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- step = client.threads.runs.steps.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- )
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- step = response.parse()
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.steps.with_streaming_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- step = response.parse()
- assert_matches_type(RunStepObject, step, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="",
- run_id="run_id",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
- client.threads.runs.steps.with_raw_response.retrieve(
- step_id="",
- thread_id="thread_id",
- run_id="run_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- step = client.threads.runs.steps.list(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- step = client.threads.runs.steps.list(
- run_id="run_id",
- thread_id="thread_id",
- after="after",
- before="before",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- limit=0,
- order="asc",
- )
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.steps.with_raw_response.list(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- step = response.parse()
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.steps.with_streaming_response.list(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- step = response.parse()
- assert_matches_type(StepListResponse, step, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.steps.with_raw_response.list(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.steps.with_raw_response.list(
- run_id="",
- thread_id="thread_id",
- )
-
-
-class TestAsyncSteps:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- step = await async_client.threads.runs.steps.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- )
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- step = await async_client.threads.runs.steps.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- )
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- step = await response.parse()
- assert_matches_type(RunStepObject, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.steps.with_streaming_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- step = await response.parse()
- assert_matches_type(RunStepObject, step, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="",
- run_id="run_id",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.steps.with_raw_response.retrieve(
- step_id="step_id",
- thread_id="thread_id",
- run_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
- await async_client.threads.runs.steps.with_raw_response.retrieve(
- step_id="",
- thread_id="thread_id",
- run_id="run_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- step = await async_client.threads.runs.steps.list(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- step = await async_client.threads.runs.steps.list(
- run_id="run_id",
- thread_id="thread_id",
- after="after",
- before="before",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- limit=0,
- order="asc",
- )
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.steps.with_raw_response.list(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- step = await response.parse()
- assert_matches_type(StepListResponse, step, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.steps.with_streaming_response.list(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- step = await response.parse()
- assert_matches_type(StepListResponse, step, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.steps.with_raw_response.list(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.steps.with_raw_response.list(
- run_id="",
- thread_id="thread_id",
- )
diff --git a/tests/api_resources/threads/test_messages.py b/tests/api_resources/threads/test_messages.py
deleted file mode 100644
index e1aaf51e..00000000
--- a/tests/api_resources/threads/test_messages.py
+++ /dev/null
@@ -1,602 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.threads import (
- MessageObject,
- MessageListResponse,
- MessageDeleteResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestMessages:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.create(
- thread_id="thread_id",
- content="string",
- role="user",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.create(
- thread_id="thread_id",
- content="string",
- role="user",
- attachments=[
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- metadata={"foo": "string"},
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.messages.with_raw_response.create(
- thread_id="thread_id",
- content="string",
- role="user",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.messages.with_streaming_response.create(
- thread_id="thread_id",
- content="string",
- role="user",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.messages.with_raw_response.create(
- thread_id="",
- content="string",
- role="user",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.messages.with_raw_response.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.messages.with_streaming_response.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.messages.with_raw_response.retrieve(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.threads.messages.with_raw_response.retrieve(
- message_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.update(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.update(
- message_id="message_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.messages.with_raw_response.update(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.messages.with_streaming_response.update(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.messages.with_raw_response.update(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.threads.messages.with_raw_response.update(
- message_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.list(
- thread_id="thread_id",
- )
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.list(
- thread_id="thread_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
- run_id="run_id",
- )
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.messages.with_raw_response.list(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.messages.with_streaming_response.list(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.messages.with_raw_response.list(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- message = client.threads.messages.delete(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.messages.with_raw_response.delete(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.messages.with_streaming_response.delete(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.messages.with_raw_response.delete(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- client.threads.messages.with_raw_response.delete(
- message_id="",
- thread_id="thread_id",
- )
-
-
-class TestAsyncMessages:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.create(
- thread_id="thread_id",
- content="string",
- role="user",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.create(
- thread_id="thread_id",
- content="string",
- role="user",
- attachments=[
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- metadata={"foo": "string"},
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.messages.with_raw_response.create(
- thread_id="thread_id",
- content="string",
- role="user",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.messages.with_streaming_response.create(
- thread_id="thread_id",
- content="string",
- role="user",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.messages.with_raw_response.create(
- thread_id="",
- content="string",
- role="user",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.messages.with_raw_response.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.messages.with_streaming_response.retrieve(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.messages.with_raw_response.retrieve(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.threads.messages.with_raw_response.retrieve(
- message_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.update(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.update(
- message_id="message_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.messages.with_raw_response.update(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.messages.with_streaming_response.update(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(MessageObject, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.messages.with_raw_response.update(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.threads.messages.with_raw_response.update(
- message_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.list(
- thread_id="thread_id",
- )
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.list(
- thread_id="thread_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
- run_id="run_id",
- )
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.messages.with_raw_response.list(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.messages.with_streaming_response.list(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(MessageListResponse, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.messages.with_raw_response.list(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- message = await async_client.threads.messages.delete(
- message_id="message_id",
- thread_id="thread_id",
- )
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.messages.with_raw_response.delete(
- message_id="message_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = await response.parse()
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.messages.with_streaming_response.delete(
- message_id="message_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(MessageDeleteResponse, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.messages.with_raw_response.delete(
- message_id="message_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
- await async_client.threads.messages.with_raw_response.delete(
- message_id="",
- thread_id="thread_id",
- )
diff --git a/tests/api_resources/threads/test_runs.py b/tests/api_resources/threads/test_runs.py
deleted file mode 100644
index 59716b5e..00000000
--- a/tests/api_resources/threads/test_runs.py
+++ /dev/null
@@ -1,967 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.threads import (
- RunObject,
- RunListResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestRuns:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.create(
- assistant_id="assistant_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.create(
- assistant_id="assistant_id",
- instructions="instructions",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="gpt-4o",
- parallel_tool_calls=True,
- response_format="auto",
- stream=True,
- temperature=1,
- thread={
- "messages": [
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- "metadata": {"foo": "string"},
- "tool_resources": {
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- },
- tool_choice="none",
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.create(
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.create(
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.retrieve(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.with_raw_response.retrieve(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.update(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.update(
- run_id="run_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.update(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.update(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.update(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.with_raw_response.update(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.list(
- thread_id="thread_id",
- )
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.list(
- thread_id="thread_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.list(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.list(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunListResponse, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.list(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.cancel(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.cancel(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.cancel(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.cancel(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.with_raw_response.cancel(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_run(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_run_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- additional_instructions="additional_instructions",
- additional_messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- instructions="instructions",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="gpt-4o",
- parallel_tool_calls=True,
- reasoning_effort="low",
- response_format="auto",
- stream=True,
- temperature=1,
- tool_choice="none",
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create_run(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create_run(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create_run(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.create_run(
- thread_id="",
- assistant_id="assistant_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_submit_tool_outputs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- run = client.threads.runs.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[
- {
- "output": "output",
- "tool_call_id": "tool_call_id",
- }
- ],
- stream=True,
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None:
- with client.threads.runs.with_streaming_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="",
- tool_outputs=[{}],
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
-
-
-class TestAsyncRuns:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.create(
- assistant_id="assistant_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.create(
- assistant_id="assistant_id",
- instructions="instructions",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="gpt-4o",
- parallel_tool_calls=True,
- response_format="auto",
- stream=True,
- temperature=1,
- thread={
- "messages": [
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- "metadata": {"foo": "string"},
- "tool_resources": {
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {
- "vector_store_ids": ["string"],
- "vector_stores": [
- {
- "chunking_strategy": {"type": "auto"},
- "file_ids": ["string"],
- "metadata": {"foo": "string"},
- }
- ],
- },
- },
- },
- tool_choice="none",
- tool_resources={
- "code_interpreter": {"file_ids": ["string"]},
- "file_search": {"vector_store_ids": ["string"]},
- },
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.create(
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.create(
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.retrieve(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.retrieve(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.with_raw_response.retrieve(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.update(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.update(
- run_id="run_id",
- thread_id="thread_id",
- metadata={"foo": "string"},
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.update(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.update(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.update(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.with_raw_response.update(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.list(
- thread_id="thread_id",
- )
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.list(
- thread_id="thread_id",
- after="after",
- before="before",
- limit=0,
- order="asc",
- )
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.list(
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunListResponse, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.list(
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunListResponse, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.list(
- thread_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.cancel(
- run_id="run_id",
- thread_id="thread_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.cancel(
- run_id="run_id",
- thread_id="thread_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.cancel(
- run_id="run_id",
- thread_id="thread_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.cancel(
- run_id="run_id",
- thread_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.with_raw_response.cancel(
- run_id="",
- thread_id="thread_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_run_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- include=["step_details.tool_calls[*].file_search.results[*].content"],
- additional_instructions="additional_instructions",
- additional_messages=[
- {
- "content": "string",
- "role": "user",
- "attachments": [
- {
- "file_id": "file_id",
- "tools": [{"type": "code_interpreter"}],
- }
- ],
- "metadata": {"foo": "string"},
- }
- ],
- instructions="instructions",
- max_completion_tokens=256,
- max_prompt_tokens=256,
- metadata={"foo": "string"},
- model="gpt-4o",
- parallel_tool_calls=True,
- reasoning_effort="low",
- response_format="auto",
- stream=True,
- temperature=1,
- tool_choice="none",
- tools=[{"type": "code_interpreter"}],
- top_p=1,
- truncation_strategy={
- "type": "auto",
- "last_messages": 1,
- },
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.create_run(
- thread_id="thread_id",
- assistant_id="assistant_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.create_run(
- thread_id="",
- assistant_id="assistant_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_submit_tool_outputs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- run = await async_client.threads.runs.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[
- {
- "output": "output",
- "tool_call_id": "tool_call_id",
- }
- ],
- stream=True,
- )
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.threads.runs.with_streaming_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="thread_id",
- tool_outputs=[{}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(RunObject, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
- await async_client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="run_id",
- thread_id="",
- tool_outputs=[{}],
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.threads.runs.with_raw_response.submit_tool_outputs(
- run_id="",
- thread_id="thread_id",
- tool_outputs=[{}],
- )
diff --git a/tests/api_resources/vector_stores/test_file_batches.py b/tests/api_resources/vector_stores/test_file_batches.py
deleted file mode 100644
index 47897412..00000000
--- a/tests/api_resources/vector_stores/test_file_batches.py
+++ /dev/null
@@ -1,479 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.vector_stores import (
- VectorStoreFileBatchObject,
- ListVectorStoreFilesResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestFileBatches:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- attributes={"foo": "string"},
- chunking_strategy={"type": "auto"},
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.file_batches.with_raw_response.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.file_batches.with_streaming_response.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.create(
- vector_store_id="",
- file_ids=["string"],
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.file_batches.with_streaming_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="",
- vector_store_id="vs_abc123",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.file_batches.with_streaming_response.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="batch_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="",
- vector_store_id="vector_store_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_files(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_files_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- file_batch = client.vector_stores.file_batches.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- after="after",
- before="before",
- filter="in_progress",
- limit=0,
- order="asc",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list_files(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list_files(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.file_batches.with_streaming_response.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list_files(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="batch_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="",
- vector_store_id="vector_store_id",
- )
-
-
-class TestAsyncFileBatches:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- attributes={"foo": "string"},
- chunking_strategy={"type": "auto"},
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.file_batches.with_raw_response.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.file_batches.with_streaming_response.create(
- vector_store_id="vs_abc123",
- file_ids=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.create(
- vector_store_id="",
- file_ids=["string"],
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.file_batches.with_streaming_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="vsfb_abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.retrieve(
- batch_id="",
- vector_store_id="vs_abc123",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.file_batches.with_streaming_response.cancel(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="batch_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.cancel(
- batch_id="",
- vector_store_id="vector_store_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_files_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file_batch = await async_client.vector_stores.file_batches.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- after="after",
- before="before",
- filter="in_progress",
- limit=0,
- order="asc",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file_batch = await response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.file_batches.with_streaming_response.list_files(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file_batch = await response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="batch_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.list_files(
- batch_id="",
- vector_store_id="vector_store_id",
- )
diff --git a/tests/api_resources/vector_stores/test_files.py b/tests/api_resources/vector_stores/test_files.py
deleted file mode 100644
index b93fe1b4..00000000
--- a/tests/api_resources/vector_stores/test_files.py
+++ /dev/null
@@ -1,677 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk.types.vector_stores import (
- FileDeleteResponse,
- VectorStoreFileObject,
- FileRetrieveContentResponse,
- ListVectorStoreFilesResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestFiles:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- attributes={"foo": "string"},
- chunking_strategy={"type": "auto"},
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.create(
- vector_store_id="",
- file_id="file_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.retrieve(
- file_id="file-abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.vector_stores.files.with_raw_response.retrieve(
- file_id="",
- vector_store_id="vs_abc123",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.update(
- file_id="file-abc123",
- vector_store_id="",
- attributes={"foo": "string"},
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.vector_stores.files.with_raw_response.update(
- file_id="",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.list(
- vector_store_id="vector_store_id",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.list(
- vector_store_id="vector_store_id",
- after="after",
- before="before",
- filter="in_progress",
- limit=0,
- order="asc",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.list(
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.list(
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.list(
- vector_store_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.delete(
- file_id="file_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.vector_stores.files.with_raw_response.delete(
- file_id="",
- vector_store_id="vector_store_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- file = client.vector_stores.files.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- response = client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = response.parse()
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- with client.vector_stores.files.with_streaming_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = response.parse()
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="",
- vector_store_id="vs_abc123",
- )
-
-
-class TestAsyncFiles:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- attributes={"foo": "string"},
- chunking_strategy={"type": "auto"},
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.create(
- vector_store_id="vs_abc123",
- file_id="file_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.create(
- vector_store_id="",
- file_id="file_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.retrieve(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.retrieve(
- file_id="file-abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.retrieve(
- file_id="",
- vector_store_id="vs_abc123",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.update(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(VectorStoreFileObject, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.update(
- file_id="file-abc123",
- vector_store_id="",
- attributes={"foo": "string"},
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.update(
- file_id="",
- vector_store_id="vs_abc123",
- attributes={"foo": "string"},
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.list(
- vector_store_id="vector_store_id",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.list(
- vector_store_id="vector_store_id",
- after="after",
- before="before",
- filter="in_progress",
- limit=0,
- order="asc",
- )
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.list(
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.list(
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.list(
- vector_store_id="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.delete(
- file_id="file_id",
- vector_store_id="vector_store_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(FileDeleteResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.delete(
- file_id="file_id",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.delete(
- file_id="",
- vector_store_id="vector_store_id",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- file = await async_client.vector_stores.files.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- response = await async_client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- file = await response.parse()
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- async with async_client.vector_stores.files.with_streaming_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="vs_abc123",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- file = await response.parse()
- assert_matches_type(FileRetrieveContentResponse, file, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="file-abc123",
- vector_store_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
- await async_client.vector_stores.files.with_raw_response.retrieve_content(
- file_id="",
- vector_store_id="vs_abc123",
- )
diff --git a/tests/conftest.py b/tests/conftest.py
index abd9aa51..3915c92b 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,20 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
from __future__ import annotations
import os
import logging
from typing import TYPE_CHECKING, Iterator, AsyncIterator
+import httpx
import pytest
from pytest_asyncio import is_async_test
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
+from gradient import Gradient, AsyncGradient, DefaultAioHttpClient
+from gradient._utils import is_dict
if TYPE_CHECKING:
- from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage]
+ from _pytest.fixtures import (
+ FixtureRequest, # pyright: ignore[reportPrivateImportUsage]
+ )
pytest.register_assert_rewrite("tests.utils")
-logging.getLogger("digitalocean_genai_sdk").setLevel(logging.DEBUG)
+logging.getLogger("gradient").setLevel(logging.DEBUG)
# automatically add `pytest.mark.asyncio()` to all of our async tests
@@ -25,29 +31,69 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None:
for async_test in pytest_asyncio_tests:
async_test.add_marker(session_scope_marker, append=False)
+ # We skip tests that use both the aiohttp client and respx_mock as respx_mock
+ # doesn't support custom transports.
+ for item in items:
+ if "async_client" not in item.fixturenames or "respx_mock" not in item.fixturenames:
+ continue
+
+ if not hasattr(item, "callspec"):
+ continue
+
+ async_client_param = item.callspec.params.get("async_client")
+ if is_dict(async_client_param) and async_client_param.get("http_client") == "aiohttp":
+ item.add_marker(pytest.mark.skip(reason="aiohttp client is not compatible with respx_mock"))
+
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-api_key = "My API Key"
+access_token = "My Access Token"
+model_access_key = "My Model Access Key"
+agent_access_key = "My Agent Access Key"
@pytest.fixture(scope="session")
-def client(request: FixtureRequest) -> Iterator[DigitaloceanGenaiSDK]:
+def client(request: FixtureRequest) -> Iterator[Gradient]:
strict = getattr(request, "param", True)
if not isinstance(strict, bool):
raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}")
- with DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client:
+ with Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=strict,
+ ) as client:
yield client
@pytest.fixture(scope="session")
-async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncDigitaloceanGenaiSDK]:
- strict = getattr(request, "param", True)
- if not isinstance(strict, bool):
- raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}")
-
- async with AsyncDigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=strict
+async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradient]:
+ param = getattr(request, "param", True)
+
+ # defaults
+ strict = True
+ http_client: None | httpx.AsyncClient = None
+
+ if isinstance(param, bool):
+ strict = param
+ elif is_dict(param):
+ strict = param.get("strict", True)
+ assert isinstance(strict, bool)
+
+ http_client_type = param.get("http_client", "httpx")
+ if http_client_type == "aiohttp":
+ http_client = DefaultAioHttpClient()
+ else:
+ raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict")
+
+ async with AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=strict,
+ http_client=http_client,
) as client:
yield client
diff --git a/tests/test_client.py b/tests/test_client.py
index c13403e3..f0605826 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -6,42 +6,47 @@
import os
import sys
import json
-import time
import asyncio
import inspect
-import subprocess
+import dataclasses
import tracemalloc
-from typing import Any, Union, cast
-from textwrap import dedent
+from typing import Any, Union, TypeVar, Callable, Iterable, Iterator, Optional, Coroutine, cast
from unittest import mock
-from typing_extensions import Literal
+from typing_extensions import Literal, AsyncIterator, override
import httpx
import pytest
from respx import MockRouter
from pydantic import ValidationError
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK, APIResponseValidationError
-from digitalocean_genai_sdk._types import Omit
-from digitalocean_genai_sdk._models import BaseModel, FinalRequestOptions
-from digitalocean_genai_sdk._constants import RAW_RESPONSE_HEADER
-from digitalocean_genai_sdk._exceptions import (
+from gradient import Gradient, AsyncGradient, APIResponseValidationError
+from gradient._types import Omit
+from gradient._utils import asyncify
+from gradient._models import BaseModel, FinalRequestOptions
+from gradient._streaming import Stream, AsyncStream
+from gradient._exceptions import (
APIStatusError,
APITimeoutError,
- DigitaloceanGenaiSDKError,
APIResponseValidationError,
)
-from digitalocean_genai_sdk._base_client import (
+from gradient._base_client import (
DEFAULT_TIMEOUT,
HTTPX_DEFAULT_TIMEOUT,
BaseClient,
+ OtherPlatform,
+ DefaultHttpxClient,
+ DefaultAsyncHttpxClient,
+ get_platform,
make_request_options,
)
from .utils import update_env
+T = TypeVar("T")
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-api_key = "My API Key"
+access_token = "My Access Token"
+model_access_key = "My Model Access Key"
+agent_access_key = "My Agent Access Key"
def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]:
@@ -54,7 +59,58 @@ def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float:
return 0.1
-def _get_open_connections(client: DigitaloceanGenaiSDK | AsyncDigitaloceanGenaiSDK) -> int:
+def mirror_request_content(request: httpx.Request) -> httpx.Response:
+ return httpx.Response(200, content=request.content)
+
+
+# note: we can't use the httpx.MockTransport class as it consumes the request
+# body itself, which means we can't test that the body is read lazily
+class MockTransport(httpx.BaseTransport, httpx.AsyncBaseTransport):
+ def __init__(
+ self,
+ handler: Callable[[httpx.Request], httpx.Response]
+ | Callable[[httpx.Request], Coroutine[Any, Any, httpx.Response]],
+ ) -> None:
+ self.handler = handler
+
+ @override
+ def handle_request(
+ self,
+ request: httpx.Request,
+ ) -> httpx.Response:
+ assert not inspect.iscoroutinefunction(self.handler), "handler must not be a coroutine function"
+ assert inspect.isfunction(self.handler), "handler must be a function"
+ return self.handler(request)
+
+ @override
+ async def handle_async_request(
+ self,
+ request: httpx.Request,
+ ) -> httpx.Response:
+ assert inspect.iscoroutinefunction(self.handler), "handler must be a coroutine function"
+ return await self.handler(request)
+
+
+@dataclasses.dataclass
+class Counter:
+ value: int = 0
+
+
+def _make_sync_iterator(iterable: Iterable[T], counter: Optional[Counter] = None) -> Iterator[T]:
+ for item in iterable:
+ if counter:
+ counter.value += 1
+ yield item
+
+
+async def _make_async_iterator(iterable: Iterable[T], counter: Optional[Counter] = None) -> AsyncIterator[T]:
+ for item in iterable:
+ if counter:
+ counter.value += 1
+ yield item
+
+
+def _get_open_connections(client: Gradient | AsyncGradient) -> int:
transport = client._client._transport
assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport)
@@ -62,56 +118,77 @@ def _get_open_connections(client: DigitaloceanGenaiSDK | AsyncDigitaloceanGenaiS
return len(pool._requests)
-class TestDigitaloceanGenaiSDK:
- client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
-
+class TestGradient:
@pytest.mark.respx(base_url=base_url)
- def test_raw_response(self, respx_mock: MockRouter) -> None:
+ def test_raw_response(self, respx_mock: MockRouter, client: Gradient) -> None:
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- response = self.client.post("/foo", cast_to=httpx.Response)
+ response = client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
@pytest.mark.respx(base_url=base_url)
- def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None:
+ def test_raw_response_for_binary(self, respx_mock: MockRouter, client: Gradient) -> None:
respx_mock.post("/foo").mock(
- return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}')
+ return_value=httpx.Response(
+ 200,
+ headers={"Content-Type": "application/binary"},
+ content='{"foo": "bar"}',
+ )
)
- response = self.client.post("/foo", cast_to=httpx.Response)
+ response = client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
- def test_copy(self) -> None:
- copied = self.client.copy()
- assert id(copied) != id(self.client)
+ def test_copy(self, client: Gradient) -> None:
+ copied = client.copy()
+ assert id(copied) != id(client)
+
+ copied = client.copy(access_token="another My Access Token")
+ assert copied.access_token == "another My Access Token"
+ assert client.access_token == "My Access Token"
+
+ copied = client.copy(model_access_key="another My Model Access Key")
+ assert copied.model_access_key == "another My Model Access Key"
+ assert client.model_access_key == "My Model Access Key"
- copied = self.client.copy(api_key="another My API Key")
- assert copied.api_key == "another My API Key"
- assert self.client.api_key == "My API Key"
+ copied = client.copy(agent_access_key="another My Agent Access Key")
+ assert copied.agent_access_key == "another My Agent Access Key"
+ assert client.agent_access_key == "My Agent Access Key"
def test_copy_default_options(self) -> None:
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ )
# options that have a default are overridden correctly
- copied = self.client.copy(max_retries=7)
+ copied = client.copy(max_retries=7)
assert copied.max_retries == 7
- assert self.client.max_retries == 2
+ assert client.max_retries == 2
copied2 = copied.copy(max_retries=6)
assert copied2.max_retries == 6
assert copied.max_retries == 7
# timeout
- assert isinstance(self.client.timeout, httpx.Timeout)
- copied = self.client.copy(timeout=None)
+ assert isinstance(client.timeout, httpx.Timeout)
+ copied = client.copy(timeout=None)
assert copied.timeout is None
- assert isinstance(self.client.timeout, httpx.Timeout)
+ assert isinstance(client.timeout, httpx.Timeout)
def test_copy_default_headers(self) -> None:
- client = DigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
assert client.default_headers["X-Foo"] == "bar"
@@ -142,10 +219,16 @@ def test_copy_default_headers(self) -> None:
match="`default_headers` and `set_default_headers` arguments are mutually exclusive",
):
client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"})
+ client.close()
def test_copy_default_query(self) -> None:
- client = DigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"}
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ default_query={"foo": "bar"},
)
assert _get_params(client)["foo"] == "bar"
@@ -179,13 +262,15 @@ def test_copy_default_query(self) -> None:
):
client.copy(set_default_query={}, default_query={"foo": "Bar"})
- def test_copy_signature(self) -> None:
+ client.close()
+
+ def test_copy_signature(self, client: Gradient) -> None:
# ensure the same parameters that can be passed to the client are defined in the `.copy()` method
init_signature = inspect.signature(
# mypy doesn't like that we access the `__init__` property.
- self.client.__init__, # type: ignore[misc]
+ client.__init__, # type: ignore[misc]
)
- copy_signature = inspect.signature(self.client.copy)
+ copy_signature = inspect.signature(client.copy)
exclude_params = {"transport", "proxies", "_strict_response_validation"}
for name in init_signature.parameters.keys():
@@ -195,12 +280,22 @@ def test_copy_signature(self) -> None:
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
+ @pytest.mark.skipif(
+ sys.version_info >= (3, 10),
+ reason="fails because of a memory leak that started from 3.12",
+ )
def test_copy_build_request(self) -> None:
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ )
options = FinalRequestOptions(method="get", url="/foo")
def build_request(options: FinalRequestOptions) -> None:
- client = self.client.copy()
- client._build_request(options)
+ client_copy = client.copy()
+ client_copy._build_request(options)
# ensure that the machinery is warmed up before tracing starts.
build_request(options)
@@ -235,10 +330,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
# to_raw_response_wrapper leaks through the @functools.wraps() decorator.
#
# removing the decorator fixes the leak for reasons we don't understand.
- "digitalocean_genai_sdk/_legacy_response.py",
- "digitalocean_genai_sdk/_response.py",
+ "gradient/_legacy_response.py",
+ "gradient/_response.py",
# pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason.
- "digitalocean_genai_sdk/_compat.py",
+ "gradient/_compat.py",
# Standard library leaks we don't care about.
"/logging/__init__.py",
]
@@ -252,106 +347,176 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
add_leak(leaks, diff)
if leaks:
for leak in leaks:
- print("MEMORY LEAK:", leak)
+ print("MEMORY LEAK:", leak) # noqa: T201
for frame in leak.traceback:
- print(frame)
+ print(frame) # noqa: T201
raise AssertionError()
- def test_request_timeout(self) -> None:
- request = self.client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ def test_request_timeout(self, client: Gradient) -> None:
+ request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
- request = self.client._build_request(
- FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0))
- )
+ request = client._build_request(FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0)))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(100.0)
def test_client_timeout_option(self) -> None:
- client = DigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0)
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ timeout=httpx.Timeout(0),
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(0)
+ client.close()
+
def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
with httpx.Client(timeout=None) as http_client:
- client = DigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(None)
+ client.close()
+
# no timeout given to the httpx client should not use the httpx default
with httpx.Client() as http_client:
- client = DigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
+ client.close()
+
# explicitly passing the default timeout currently results in it being ignored
with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
- client = DigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT # our default
+ client.close()
+
async def test_invalid_http_client(self) -> None:
with pytest.raises(TypeError, match="Invalid `http_client` arg"):
async with httpx.AsyncClient() as http_client:
- DigitaloceanGenaiSDK(
+ Gradient(
base_url=base_url,
- api_key=api_key,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
_strict_response_validation=True,
http_client=cast(Any, http_client),
)
def test_default_headers_option(self) -> None:
- client = DigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ test_client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = test_client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
assert request.headers.get("x-stainless-lang") == "python"
- client2 = DigitaloceanGenaiSDK(
+ test_client2 = Gradient(
base_url=base_url,
- api_key=api_key,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
"X-Stainless-Lang": "my-overriding-header",
},
)
- request = client2._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = test_client2._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "stainless"
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
+ test_client.close()
+ test_client2.close()
+
def test_validate_headers(self) -> None:
- client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ )
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
- assert request.headers.get("Authorization") == f"Bearer {api_key}"
+ assert request.headers.get("Authorization") == f"Bearer {access_token}"
+
+ with update_env(
+ **{
+ "DIGITALOCEAN_ACCESS_TOKEN": Omit(),
+ "MODEL_ACCESS_KEY": Omit(),
+ "AGENT_ACCESS_KEY": Omit(),
+ }
+ ):
+ client2 = Gradient(
+ base_url=base_url,
+ access_token=None,
+ model_access_key=None,
+ agent_access_key=None,
+ _strict_response_validation=True,
+ )
+
+ with pytest.raises(
+ TypeError,
+ match="Could not resolve authentication method. Expected access_token, agent_access_key, or model_access_key to be set. Or for the `Authorization` headers to be explicitly omitted",
+ ):
+ client2._build_request(FinalRequestOptions(method="get", url="/foo"))
- with pytest.raises(DigitaloceanGenaiSDKError):
- with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}):
- client2 = DigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True)
- _ = client2
+ request2 = client2._build_request(
+ FinalRequestOptions(method="get", url="/foo", headers={"Authorization": Omit()})
+ )
+ assert request2.headers.get("Authorization") is None
def test_default_query_option(self) -> None:
- client = DigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ default_query={"query_param": "bar"},
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
@@ -367,8 +532,10 @@ def test_default_query_option(self) -> None:
url = httpx.URL(request.url)
assert dict(url.params) == {"foo": "baz", "query_param": "overridden"}
- def test_request_extra_json(self) -> None:
- request = self.client._build_request(
+ client.close()
+
+ def test_request_extra_json(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -379,7 +546,7 @@ def test_request_extra_json(self) -> None:
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": False}
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -390,7 +557,7 @@ def test_request_extra_json(self) -> None:
assert data == {"baz": False}
# `extra_json` takes priority over `json_data` when keys clash
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -401,8 +568,8 @@ def test_request_extra_json(self) -> None:
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": None}
- def test_request_extra_headers(self) -> None:
- request = self.client._build_request(
+ def test_request_extra_headers(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -412,7 +579,7 @@ def test_request_extra_headers(self) -> None:
assert request.headers.get("X-Foo") == "Foo"
# `extra_headers` takes priority over `default_headers` when keys clash
- request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request(
+ request = client.with_options(default_headers={"X-Bar": "true"})._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -423,8 +590,8 @@ def test_request_extra_headers(self) -> None:
)
assert request.headers.get("X-Bar") == "false"
- def test_request_extra_query(self) -> None:
- request = self.client._build_request(
+ def test_request_extra_query(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -437,7 +604,7 @@ def test_request_extra_query(self) -> None:
assert params == {"my_query_param": "Foo"}
# if both `query` and `extra_query` are given, they are merged
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -451,7 +618,7 @@ def test_request_extra_query(self) -> None:
assert params == {"bar": "1", "foo": "2"}
# `extra_query` takes priority over `query` when keys clash
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -464,10 +631,10 @@ def test_request_extra_query(self) -> None:
params = dict(request.url.params)
assert params == {"foo": "2"}
- def test_multipart_repeating_array(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_multipart_repeating_array(self, client: Gradient) -> None:
request = client._build_request(
FinalRequestOptions.construct(
- method="get",
+ method="post",
url="/foo",
headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"},
json_data={"array": ["foo", "bar"]},
@@ -494,7 +661,71 @@ def test_multipart_repeating_array(self, client: DigitaloceanGenaiSDK) -> None:
]
@pytest.mark.respx(base_url=base_url)
- def test_basic_union_response(self, respx_mock: MockRouter) -> None:
+ def test_binary_content_upload(self, respx_mock: MockRouter, client: Gradient) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ response = client.post(
+ "/upload",
+ content=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
+ def test_binary_content_upload_with_iterator(self) -> None:
+ file_content = b"Hello, this is a test file."
+ counter = Counter()
+ iterator = _make_sync_iterator([file_content], counter=counter)
+
+ def mock_handler(request: httpx.Request) -> httpx.Response:
+ assert counter.value == 0, "the request body should not have been read"
+ return httpx.Response(200, content=request.read())
+
+ with Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ _strict_response_validation=True,
+ http_client=httpx.Client(transport=MockTransport(handler=mock_handler)),
+ ) as client:
+ response = client.post(
+ "/upload",
+ content=iterator,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+ assert counter.value == 1
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_binary_content_upload_with_body_is_deprecated(self, respx_mock: MockRouter, client: Gradient) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ with pytest.deprecated_call(
+ match="Passing raw bytes as `body` is deprecated and will be removed in a future version. Please pass raw bytes via the `content` parameter instead."
+ ):
+ response = client.post(
+ "/upload",
+ body=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_basic_union_response(self, respx_mock: MockRouter, client: Gradient) -> None:
class Model1(BaseModel):
name: str
@@ -503,12 +734,12 @@ class Model2(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
@pytest.mark.respx(base_url=base_url)
- def test_union_response_different_types(self, respx_mock: MockRouter) -> None:
+ def test_union_response_different_types(self, respx_mock: MockRouter, client: Gradient) -> None:
"""Union of objects with the same field name using a different type"""
class Model1(BaseModel):
@@ -519,18 +750,18 @@ class Model2(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1}))
- response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model1)
assert response.foo == 1
@pytest.mark.respx(base_url=base_url)
- def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None:
+ def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter, client: Gradient) -> None:
"""
Response that sets Content-Type to something other than application/json but returns json data
"""
@@ -546,13 +777,17 @@ class Model(BaseModel):
)
)
- response = self.client.get("/foo", cast_to=Model)
+ response = client.get("/foo", cast_to=Model)
assert isinstance(response, Model)
assert response.foo == 2
def test_base_url_setter(self) -> None:
- client = DigitaloceanGenaiSDK(
- base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True
+ client = Gradient(
+ base_url="https://example.com/from_init",
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
)
assert client.base_url == "https://example.com/from_init/"
@@ -560,27 +795,40 @@ def test_base_url_setter(self) -> None:
assert client.base_url == "https://example.com/from_setter/"
+ client.close()
+
def test_base_url_env(self) -> None:
- with update_env(DIGITALOCEAN_GENAI_SDK_BASE_URL="http://localhost:5000/from/env"):
- client = DigitaloceanGenaiSDK(api_key=api_key, _strict_response_validation=True)
+ with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"):
+ client = Gradient(
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ )
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
"client",
[
- DigitaloceanGenaiSDK(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ Gradient(
+ base_url="http://localhost:5000/custom/path/",
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
),
- DigitaloceanGenaiSDK(
+ Gradient(
base_url="http://localhost:5000/custom/path/",
- api_key=api_key,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
],
ids=["standard", "custom http client"],
)
- def test_base_url_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_base_url_trailing_slash(self, client: Gradient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -589,23 +837,30 @@ def test_base_url_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None:
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
+ client.close()
@pytest.mark.parametrize(
"client",
[
- DigitaloceanGenaiSDK(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ Gradient(
+ base_url="http://localhost:5000/custom/path/",
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
),
- DigitaloceanGenaiSDK(
+ Gradient(
base_url="http://localhost:5000/custom/path/",
- api_key=api_key,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
],
ids=["standard", "custom http client"],
)
- def test_base_url_no_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_base_url_no_trailing_slash(self, client: Gradient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -614,23 +869,30 @@ def test_base_url_no_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None:
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
+ client.close()
@pytest.mark.parametrize(
"client",
[
- DigitaloceanGenaiSDK(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ Gradient(
+ base_url="http://localhost:5000/custom/path/",
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
),
- DigitaloceanGenaiSDK(
+ Gradient(
base_url="http://localhost:5000/custom/path/",
- api_key=api_key,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
],
ids=["standard", "custom http client"],
)
- def test_absolute_request_url(self, client: DigitaloceanGenaiSDK) -> None:
+ def test_absolute_request_url(self, client: Gradient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -639,44 +901,69 @@ def test_absolute_request_url(self, client: DigitaloceanGenaiSDK) -> None:
),
)
assert request.url == "https://myapi.com/foo"
+ client.close()
def test_copied_client_does_not_close_http(self) -> None:
- client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
- assert not client.is_closed()
+ test_client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ _strict_response_validation=True,
+ )
+ assert not test_client.is_closed()
- copied = client.copy()
- assert copied is not client
+ copied = test_client.copy()
+ assert copied is not test_client
del copied
- assert not client.is_closed()
+ assert not test_client.is_closed()
def test_client_context_manager(self) -> None:
- client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
- with client as c2:
- assert c2 is client
+ test_client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ _strict_response_validation=True,
+ )
+ with test_client as c2:
+ assert c2 is test_client
assert not c2.is_closed()
- assert not client.is_closed()
- assert client.is_closed()
+ assert not test_client.is_closed()
+ assert test_client.is_closed()
@pytest.mark.respx(base_url=base_url)
- def test_client_response_validation_error(self, respx_mock: MockRouter) -> None:
+ def test_client_response_validation_error(self, respx_mock: MockRouter, client: Gradient) -> None:
class Model(BaseModel):
foo: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}}))
with pytest.raises(APIResponseValidationError) as exc:
- self.client.get("/foo", cast_to=Model)
+ client.get("/foo", cast_to=Model)
assert isinstance(exc.value.__cause__, ValidationError)
def test_client_max_retries_validation(self) -> None:
with pytest.raises(TypeError, match=r"max_retries cannot be None"):
- DigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None)
+ Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ max_retries=cast(Any, None),
)
+ @pytest.mark.respx(base_url=base_url)
+ def test_default_stream_cls(self, respx_mock: MockRouter, client: Gradient) -> None:
+ class Model(BaseModel):
+ name: str
+
+ respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+
+ stream = client.post("/foo", cast_to=Model, stream=True, stream_cls=Stream[Model])
+ assert isinstance(stream, Stream)
+ stream.response.close()
+
@pytest.mark.respx(base_url=base_url)
def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None:
class Model(BaseModel):
@@ -684,16 +971,29 @@ class Model(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
- strict_client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ strict_client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ )
with pytest.raises(APIResponseValidationError):
strict_client.get("/foo", cast_to=Model)
- client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=False)
+ non_strict_client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ _strict_response_validation=False,
+ )
- response = client.get("/foo", cast_to=Model)
+ response = non_strict_client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
+ strict_client.close()
+ non_strict_client.close()
+
@pytest.mark.parametrize(
"remaining_retries,retry_after,timeout",
[
@@ -717,40 +1017,61 @@ class Model(BaseModel):
)
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None:
- client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ )
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
calculated = client._calculate_retry_timeout(remaining_retries, options, headers)
- assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
+ assert calculated == pytest.approx(timeout, rel=0.5 * 0.875) # type: ignore[misc]
- @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.get("/assistants").mock(side_effect=httpx.TimeoutException("Test timeout error"))
+ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: Gradient) -> None:
+ respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
- self.client.get("/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}})
-
- assert _get_open_connections(self.client) == 0
-
- @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ client.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ ).__enter__()
+
+ assert _get_open_connections(client) == 0
+
+ @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.get("/assistants").mock(return_value=httpx.Response(500))
+ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: Gradient) -> None:
+ respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
- self.client.get("/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}})
-
- assert _get_open_connections(self.client) == 0
+ client.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ ).__enter__()
+ assert _get_open_connections(client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
@pytest.mark.parametrize("failure_mode", ["status", "exception"])
def test_retries_taken(
self,
- client: DigitaloceanGenaiSDK,
+ client: Gradient,
failures_before_success: int,
failure_mode: Literal["status", "exception"],
respx_mock: MockRouter,
@@ -768,18 +1089,26 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
- response = client.assistants.with_raw_response.list()
+ response = client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ )
assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_omit_retry_count_header(
- self, client: DigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter
+ self, client: Gradient, failures_before_success: int, respx_mock: MockRouter
) -> None:
client = client.with_options(max_retries=4)
@@ -792,17 +1121,26 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
-
- response = client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()})
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
+
+ response = client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ extra_headers={"x-stainless-retry-count": Omit()},
+ )
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_overwrite_retry_count_header(
- self, client: DigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter
+ self, client: Gradient, failures_before_success: int, respx_mock: MockRouter
) -> None:
client = client.with_options(max_retries=4)
@@ -815,65 +1153,153 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
-
- response = client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"})
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
+
+ response = client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ extra_headers={"x-stainless-retry-count": "42"},
+ )
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
+ def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
+ # Test that the proxy environment variables are set correctly
+ monkeypatch.setenv("HTTPS_PROXY", "https://example.org")
+ # Delete in case our environment has any proxy env vars set
+ monkeypatch.delenv("HTTP_PROXY", raising=False)
+ monkeypatch.delenv("ALL_PROXY", raising=False)
+ monkeypatch.delenv("NO_PROXY", raising=False)
+ monkeypatch.delenv("http_proxy", raising=False)
+ monkeypatch.delenv("https_proxy", raising=False)
+ monkeypatch.delenv("all_proxy", raising=False)
+ monkeypatch.delenv("no_proxy", raising=False)
+
+ client = DefaultHttpxClient()
+
+ mounts = tuple(client._mounts.items())
+ assert len(mounts) == 1
+ assert mounts[0][0].pattern == "https://"
+
+ @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning")
+ def test_default_client_creation(self) -> None:
+ # Ensure that the client can be initialized without any exceptions
+ DefaultHttpxClient(
+ verify=True,
+ cert=None,
+ trust_env=True,
+ http1=True,
+ http2=False,
+ limits=httpx.Limits(max_connections=100, max_keepalive_connections=20),
+ )
-class TestAsyncDigitaloceanGenaiSDK:
- client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ @pytest.mark.respx(base_url=base_url)
+ def test_follow_redirects(self, respx_mock: MockRouter, client: Gradient) -> None:
+ # Test that the default follow_redirects=True allows following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+ respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
+
+ response = client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+ assert response.status_code == 200
+ assert response.json() == {"status": "ok"}
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
- async def test_raw_response(self, respx_mock: MockRouter) -> None:
+ def test_follow_redirects_disabled(self, respx_mock: MockRouter, client: Gradient) -> None:
+ # Test that follow_redirects=False prevents following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+
+ with pytest.raises(APIStatusError) as exc_info:
+ client.post(
+ "/redirect",
+ body={"key": "value"},
+ options={"follow_redirects": False},
+ cast_to=httpx.Response,
+ )
+
+ assert exc_info.value.response.status_code == 302
+ assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"
+
+
+class TestAsyncGradient:
+ @pytest.mark.respx(base_url=base_url)
+ async def test_raw_response(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None:
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- response = await self.client.post("/foo", cast_to=httpx.Response)
+ response = await async_client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
- async def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None:
+ async def test_raw_response_for_binary(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None:
respx_mock.post("/foo").mock(
- return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}')
+ return_value=httpx.Response(
+ 200,
+ headers={"Content-Type": "application/binary"},
+ content='{"foo": "bar"}',
+ )
)
- response = await self.client.post("/foo", cast_to=httpx.Response)
+ response = await async_client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
- def test_copy(self) -> None:
- copied = self.client.copy()
- assert id(copied) != id(self.client)
+ def test_copy(self, async_client: AsyncGradient) -> None:
+ copied = async_client.copy()
+ assert id(copied) != id(async_client)
+
+ copied = async_client.copy(access_token="another My Access Token")
+ assert copied.access_token == "another My Access Token"
+ assert async_client.access_token == "My Access Token"
- copied = self.client.copy(api_key="another My API Key")
- assert copied.api_key == "another My API Key"
- assert self.client.api_key == "My API Key"
+ copied = async_client.copy(model_access_key="another My Model Access Key")
+ assert copied.model_access_key == "another My Model Access Key"
+ assert async_client.model_access_key == "My Model Access Key"
+
+ copied = async_client.copy(agent_access_key="another My Agent Access Key")
+ assert copied.agent_access_key == "another My Agent Access Key"
+ assert async_client.agent_access_key == "My Agent Access Key"
def test_copy_default_options(self) -> None:
+ async_client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ )
# options that have a default are overridden correctly
- copied = self.client.copy(max_retries=7)
+ copied = async_client.copy(max_retries=7)
assert copied.max_retries == 7
- assert self.client.max_retries == 2
+ assert async_client.max_retries == 2
copied2 = copied.copy(max_retries=6)
assert copied2.max_retries == 6
assert copied.max_retries == 7
# timeout
- assert isinstance(self.client.timeout, httpx.Timeout)
- copied = self.client.copy(timeout=None)
+ assert isinstance(async_client.timeout, httpx.Timeout)
+ copied = async_client.copy(timeout=None)
assert copied.timeout is None
- assert isinstance(self.client.timeout, httpx.Timeout)
+ assert isinstance(async_client.timeout, httpx.Timeout)
- def test_copy_default_headers(self) -> None:
- client = AsyncDigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ async def test_copy_default_headers(self) -> None:
+ client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
assert client.default_headers["X-Foo"] == "bar"
@@ -904,10 +1330,16 @@ def test_copy_default_headers(self) -> None:
match="`default_headers` and `set_default_headers` arguments are mutually exclusive",
):
client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"})
+ await client.close()
- def test_copy_default_query(self) -> None:
- client = AsyncDigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"}
+ async def test_copy_default_query(self) -> None:
+ client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ default_query={"foo": "bar"},
)
assert _get_params(client)["foo"] == "bar"
@@ -941,13 +1373,15 @@ def test_copy_default_query(self) -> None:
):
client.copy(set_default_query={}, default_query={"foo": "Bar"})
- def test_copy_signature(self) -> None:
+ await client.close()
+
+ def test_copy_signature(self, async_client: AsyncGradient) -> None:
# ensure the same parameters that can be passed to the client are defined in the `.copy()` method
init_signature = inspect.signature(
# mypy doesn't like that we access the `__init__` property.
- self.client.__init__, # type: ignore[misc]
+ async_client.__init__, # type: ignore[misc]
)
- copy_signature = inspect.signature(self.client.copy)
+ copy_signature = inspect.signature(async_client.copy)
exclude_params = {"transport", "proxies", "_strict_response_validation"}
for name in init_signature.parameters.keys():
@@ -957,12 +1391,22 @@ def test_copy_signature(self) -> None:
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
+ @pytest.mark.skipif(
+ sys.version_info >= (3, 10),
+ reason="fails because of a memory leak that started from 3.12",
+ )
def test_copy_build_request(self) -> None:
+ async_client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ )
options = FinalRequestOptions(method="get", url="/foo")
def build_request(options: FinalRequestOptions) -> None:
- client = self.client.copy()
- client._build_request(options)
+ client_copy = async_client.copy()
+ client_copy._build_request(options)
# ensure that the machinery is warmed up before tracing starts.
build_request(options)
@@ -997,10 +1441,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
# to_raw_response_wrapper leaks through the @functools.wraps() decorator.
#
# removing the decorator fixes the leak for reasons we don't understand.
- "digitalocean_genai_sdk/_legacy_response.py",
- "digitalocean_genai_sdk/_response.py",
+ "gradient/_legacy_response.py",
+ "gradient/_response.py",
# pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason.
- "digitalocean_genai_sdk/_compat.py",
+ "gradient/_compat.py",
# Standard library leaks we don't care about.
"/logging/__init__.py",
]
@@ -1014,106 +1458,178 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
add_leak(leaks, diff)
if leaks:
for leak in leaks:
- print("MEMORY LEAK:", leak)
+ print("MEMORY LEAK:", leak) # noqa: T201
for frame in leak.traceback:
- print(frame)
+ print(frame) # noqa: T201
raise AssertionError()
- async def test_request_timeout(self) -> None:
- request = self.client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ async def test_request_timeout(self, async_client: AsyncGradient) -> None:
+ request = async_client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
- request = self.client._build_request(
+ request = async_client._build_request(
FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0))
)
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(100.0)
async def test_client_timeout_option(self) -> None:
- client = AsyncDigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0)
+ client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ timeout=httpx.Timeout(0),
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(0)
+ await client.close()
+
async def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
async with httpx.AsyncClient(timeout=None) as http_client:
- client = AsyncDigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(None)
+ await client.close()
+
# no timeout given to the httpx client should not use the httpx default
async with httpx.AsyncClient() as http_client:
- client = AsyncDigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
+ await client.close()
+
# explicitly passing the default timeout currently results in it being ignored
async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
- client = AsyncDigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT # our default
+ await client.close()
+
def test_invalid_http_client(self) -> None:
with pytest.raises(TypeError, match="Invalid `http_client` arg"):
with httpx.Client() as http_client:
- AsyncDigitaloceanGenaiSDK(
+ AsyncGradient(
base_url=base_url,
- api_key=api_key,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
_strict_response_validation=True,
http_client=cast(Any, http_client),
)
- def test_default_headers_option(self) -> None:
- client = AsyncDigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ async def test_default_headers_option(self) -> None:
+ test_client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = test_client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
assert request.headers.get("x-stainless-lang") == "python"
- client2 = AsyncDigitaloceanGenaiSDK(
+ test_client2 = AsyncGradient(
base_url=base_url,
- api_key=api_key,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
"X-Stainless-Lang": "my-overriding-header",
},
)
- request = client2._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = test_client2._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "stainless"
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
+ await test_client.close()
+ await test_client2.close()
+
def test_validate_headers(self) -> None:
- client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ )
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
- assert request.headers.get("Authorization") == f"Bearer {api_key}"
+ assert request.headers.get("Authorization") == f"Bearer {access_token}"
+
+ with update_env(
+ **{
+ "DIGITALOCEAN_ACCESS_TOKEN": Omit(),
+ "MODEL_ACCESS_KEY": Omit(),
+ "AGENT_ACCESS_KEY": Omit(),
+ }
+ ):
+ client2 = AsyncGradient(
+ base_url=base_url,
+ access_token=None,
+ model_access_key=None,
+ agent_access_key=None,
+ _strict_response_validation=True,
+ )
- with pytest.raises(DigitaloceanGenaiSDKError):
- with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}):
- client2 = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True)
- _ = client2
+ with pytest.raises(
+ TypeError,
+ match="Could not resolve authentication method. Expected access_token, agent_access_key, or model_access_key to be set. Or for the `Authorization` headers to be explicitly omitted",
+ ):
+ client2._build_request(FinalRequestOptions(method="get", url="/foo"))
- def test_default_query_option(self) -> None:
- client = AsyncDigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
+ request2 = client2._build_request(
+ FinalRequestOptions(method="get", url="/foo", headers={"Authorization": Omit()})
+ )
+ assert request2.headers.get("Authorization") is None
+
+ async def test_default_query_option(self) -> None:
+ client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ default_query={"query_param": "bar"},
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
@@ -1129,8 +1645,10 @@ def test_default_query_option(self) -> None:
url = httpx.URL(request.url)
assert dict(url.params) == {"foo": "baz", "query_param": "overridden"}
- def test_request_extra_json(self) -> None:
- request = self.client._build_request(
+ await client.close()
+
+ def test_request_extra_json(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1141,7 +1659,7 @@ def test_request_extra_json(self) -> None:
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": False}
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1152,7 +1670,7 @@ def test_request_extra_json(self) -> None:
assert data == {"baz": False}
# `extra_json` takes priority over `json_data` when keys clash
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1163,8 +1681,8 @@ def test_request_extra_json(self) -> None:
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": None}
- def test_request_extra_headers(self) -> None:
- request = self.client._build_request(
+ def test_request_extra_headers(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1174,7 +1692,7 @@ def test_request_extra_headers(self) -> None:
assert request.headers.get("X-Foo") == "Foo"
# `extra_headers` takes priority over `default_headers` when keys clash
- request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request(
+ request = client.with_options(default_headers={"X-Bar": "true"})._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1185,8 +1703,8 @@ def test_request_extra_headers(self) -> None:
)
assert request.headers.get("X-Bar") == "false"
- def test_request_extra_query(self) -> None:
- request = self.client._build_request(
+ def test_request_extra_query(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1199,7 +1717,7 @@ def test_request_extra_query(self) -> None:
assert params == {"my_query_param": "Foo"}
# if both `query` and `extra_query` are given, they are merged
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1213,7 +1731,7 @@ def test_request_extra_query(self) -> None:
assert params == {"bar": "1", "foo": "2"}
# `extra_query` takes priority over `query` when keys clash
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1226,10 +1744,10 @@ def test_request_extra_query(self) -> None:
params = dict(request.url.params)
assert params == {"foo": "2"}
- def test_multipart_repeating_array(self, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+ def test_multipart_repeating_array(self, async_client: AsyncGradient) -> None:
request = async_client._build_request(
FinalRequestOptions.construct(
- method="get",
+ method="post",
url="/foo",
headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"},
json_data={"array": ["foo", "bar"]},
@@ -1256,7 +1774,73 @@ def test_multipart_repeating_array(self, async_client: AsyncDigitaloceanGenaiSDK
]
@pytest.mark.respx(base_url=base_url)
- async def test_basic_union_response(self, respx_mock: MockRouter) -> None:
+ async def test_binary_content_upload(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ response = await async_client.post(
+ "/upload",
+ content=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
+ async def test_binary_content_upload_with_asynciterator(self) -> None:
+ file_content = b"Hello, this is a test file."
+ counter = Counter()
+ iterator = _make_async_iterator([file_content], counter=counter)
+
+ async def mock_handler(request: httpx.Request) -> httpx.Response:
+ assert counter.value == 0, "the request body should not have been read"
+ return httpx.Response(200, content=await request.aread())
+
+ async with AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ _strict_response_validation=True,
+ http_client=httpx.AsyncClient(transport=MockTransport(handler=mock_handler)),
+ ) as client:
+ response = await client.post(
+ "/upload",
+ content=iterator,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+ assert counter.value == 1
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_binary_content_upload_with_body_is_deprecated(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ with pytest.deprecated_call(
+ match="Passing raw bytes as `body` is deprecated and will be removed in a future version. Please pass raw bytes via the `content` parameter instead."
+ ):
+ response = await async_client.post(
+ "/upload",
+ body=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_basic_union_response(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None:
class Model1(BaseModel):
name: str
@@ -1265,12 +1849,12 @@ class Model2(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
@pytest.mark.respx(base_url=base_url)
- async def test_union_response_different_types(self, respx_mock: MockRouter) -> None:
+ async def test_union_response_different_types(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None:
"""Union of objects with the same field name using a different type"""
class Model1(BaseModel):
@@ -1281,18 +1865,20 @@ class Model2(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
- response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1}))
- response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model1)
assert response.foo == 1
@pytest.mark.respx(base_url=base_url)
- async def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None:
+ async def test_non_application_json_content_type_for_json_data(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
"""
Response that sets Content-Type to something other than application/json but returns json data
"""
@@ -1308,13 +1894,17 @@ class Model(BaseModel):
)
)
- response = await self.client.get("/foo", cast_to=Model)
+ response = await async_client.get("/foo", cast_to=Model)
assert isinstance(response, Model)
assert response.foo == 2
- def test_base_url_setter(self) -> None:
- client = AsyncDigitaloceanGenaiSDK(
- base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True
+ async def test_base_url_setter(self) -> None:
+ client = AsyncGradient(
+ base_url="https://example.com/from_init",
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
)
assert client.base_url == "https://example.com/from_init/"
@@ -1322,27 +1912,40 @@ def test_base_url_setter(self) -> None:
assert client.base_url == "https://example.com/from_setter/"
- def test_base_url_env(self) -> None:
- with update_env(DIGITALOCEAN_GENAI_SDK_BASE_URL="http://localhost:5000/from/env"):
- client = AsyncDigitaloceanGenaiSDK(api_key=api_key, _strict_response_validation=True)
+ await client.close()
+
+ async def test_base_url_env(self) -> None:
+ with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"):
+ client = AsyncGradient(
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ )
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
"client",
[
- AsyncDigitaloceanGenaiSDK(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ AsyncGradient(
+ base_url="http://localhost:5000/custom/path/",
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
),
- AsyncDigitaloceanGenaiSDK(
+ AsyncGradient(
base_url="http://localhost:5000/custom/path/",
- api_key=api_key,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
],
ids=["standard", "custom http client"],
)
- def test_base_url_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_base_url_trailing_slash(self, client: AsyncGradient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1351,23 +1954,30 @@ def test_base_url_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> Non
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
+ await client.close()
@pytest.mark.parametrize(
"client",
[
- AsyncDigitaloceanGenaiSDK(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ AsyncGradient(
+ base_url="http://localhost:5000/custom/path/",
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
),
- AsyncDigitaloceanGenaiSDK(
+ AsyncGradient(
base_url="http://localhost:5000/custom/path/",
- api_key=api_key,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
],
ids=["standard", "custom http client"],
)
- def test_base_url_no_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1376,23 +1986,30 @@ def test_base_url_no_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) ->
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
+ await client.close()
@pytest.mark.parametrize(
"client",
[
- AsyncDigitaloceanGenaiSDK(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ AsyncGradient(
+ base_url="http://localhost:5000/custom/path/",
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
),
- AsyncDigitaloceanGenaiSDK(
+ AsyncGradient(
base_url="http://localhost:5000/custom/path/",
- api_key=api_key,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
],
ids=["standard", "custom http client"],
)
- def test_absolute_request_url(self, client: AsyncDigitaloceanGenaiSDK) -> None:
+ async def test_absolute_request_url(self, client: AsyncGradient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1401,64 +2018,106 @@ def test_absolute_request_url(self, client: AsyncDigitaloceanGenaiSDK) -> None:
),
)
assert request.url == "https://myapi.com/foo"
+ await client.close()
async def test_copied_client_does_not_close_http(self) -> None:
- client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
- assert not client.is_closed()
+ test_client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ )
+ assert not test_client.is_closed()
- copied = client.copy()
- assert copied is not client
+ copied = test_client.copy()
+ assert copied is not test_client
del copied
await asyncio.sleep(0.2)
- assert not client.is_closed()
+ assert not test_client.is_closed()
async def test_client_context_manager(self) -> None:
- client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
- async with client as c2:
- assert c2 is client
+ test_client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ )
+ async with test_client as c2:
+ assert c2 is test_client
assert not c2.is_closed()
- assert not client.is_closed()
- assert client.is_closed()
+ assert not test_client.is_closed()
+ assert test_client.is_closed()
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
- async def test_client_response_validation_error(self, respx_mock: MockRouter) -> None:
+ async def test_client_response_validation_error(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None:
class Model(BaseModel):
foo: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}}))
with pytest.raises(APIResponseValidationError) as exc:
- await self.client.get("/foo", cast_to=Model)
+ await async_client.get("/foo", cast_to=Model)
assert isinstance(exc.value.__cause__, ValidationError)
async def test_client_max_retries_validation(self) -> None:
with pytest.raises(TypeError, match=r"max_retries cannot be None"):
- AsyncDigitaloceanGenaiSDK(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None)
+ AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ max_retries=cast(Any, None),
)
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
+ async def test_default_stream_cls(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None:
+ class Model(BaseModel):
+ name: str
+
+ respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+
+ stream = await async_client.post("/foo", cast_to=Model, stream=True, stream_cls=AsyncStream[Model])
+ assert isinstance(stream, AsyncStream)
+ await stream.response.aclose()
+
+ @pytest.mark.respx(base_url=base_url)
async def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None:
class Model(BaseModel):
name: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
- strict_client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ strict_client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ )
with pytest.raises(APIResponseValidationError):
await strict_client.get("/foo", cast_to=Model)
- client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=False)
+ non_strict_client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=False,
+ )
- response = await client.get("/foo", cast_to=Model)
+ response = await non_strict_client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
+ await strict_client.close()
+ await non_strict_client.close()
+
@pytest.mark.parametrize(
"remaining_retries,retry_after,timeout",
[
@@ -1483,45 +2142,65 @@ class Model(BaseModel):
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
@pytest.mark.asyncio
async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None:
- client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ async_client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ _strict_response_validation=True,
+ )
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
- calculated = client._calculate_retry_timeout(remaining_retries, options, headers)
- assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
+ calculated = async_client._calculate_retry_timeout(remaining_retries, options, headers)
+ assert calculated == pytest.approx(timeout, rel=0.5 * 0.875) # type: ignore[misc]
- @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.get("/assistants").mock(side_effect=httpx.TimeoutException("Test timeout error"))
+ async def test_retrying_timeout_errors_doesnt_leak(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
+ respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
- await self.client.get(
- "/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}
- )
-
- assert _get_open_connections(self.client) == 0
-
- @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ await async_client.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ ).__aenter__()
+
+ assert _get_open_connections(async_client) == 0
+
+ @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.get("/assistants").mock(return_value=httpx.Response(500))
+ async def test_retrying_status_errors_doesnt_leak(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
+ respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
- await self.client.get(
- "/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}
- )
-
- assert _get_open_connections(self.client) == 0
+ await async_client.chat.completions.with_streaming_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ ).__aenter__()
+ assert _get_open_connections(async_client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
@pytest.mark.parametrize("failure_mode", ["status", "exception"])
async def test_retries_taken(
self,
- async_client: AsyncDigitaloceanGenaiSDK,
+ async_client: AsyncGradient,
failures_before_success: int,
failure_mode: Literal["status", "exception"],
respx_mock: MockRouter,
@@ -1539,19 +2218,29 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
- response = await client.assistants.with_raw_response.list()
+ response = await client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ )
assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
async def test_omit_retry_count_header(
- self, async_client: AsyncDigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter
+ self,
+ async_client: AsyncGradient,
+ failures_before_success: int,
+ respx_mock: MockRouter,
) -> None:
client = async_client.with_options(max_retries=4)
@@ -1564,18 +2253,29 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
-
- response = await client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()})
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
+
+ response = await client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ extra_headers={"x-stainless-retry-count": Omit()},
+ )
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
async def test_overwrite_retry_count_header(
- self, async_client: AsyncDigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter
+ self,
+ async_client: AsyncGradient,
+ failures_before_success: int,
+ respx_mock: MockRouter,
) -> None:
client = async_client.with_options(max_retries=4)
@@ -1588,53 +2288,81 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.get("/assistants").mock(side_effect=retry_handler)
-
- response = await client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"})
+ respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
+
+ response = await client.chat.completions.with_raw_response.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ extra_headers={"x-stainless-retry-count": "42"},
+ )
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
- def test_get_platform(self) -> None:
- # A previous implementation of asyncify could leave threads unterminated when
- # used with nest_asyncio.
- #
- # Since nest_asyncio.apply() is global and cannot be un-applied, this
- # test is run in a separate process to avoid affecting other tests.
- test_code = dedent("""
- import asyncio
- import nest_asyncio
- import threading
-
- from digitalocean_genai_sdk._utils import asyncify
- from digitalocean_genai_sdk._base_client import get_platform
-
- async def test_main() -> None:
- result = await asyncify(get_platform)()
- print(result)
- for thread in threading.enumerate():
- print(thread.name)
-
- nest_asyncio.apply()
- asyncio.run(test_main())
- """)
- with subprocess.Popen(
- [sys.executable, "-c", test_code],
- text=True,
- ) as process:
- timeout = 10 # seconds
-
- start_time = time.monotonic()
- while True:
- return_code = process.poll()
- if return_code is not None:
- if return_code != 0:
- raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code")
-
- # success
- break
-
- if time.monotonic() - start_time > timeout:
- process.kill()
- raise AssertionError("calling get_platform using asyncify resulted in a hung process")
-
- time.sleep(0.1)
+ async def test_get_platform(self) -> None:
+ platform = await asyncify(get_platform)()
+ assert isinstance(platform, (str, OtherPlatform))
+
+ async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
+ # Test that the proxy environment variables are set correctly
+ monkeypatch.setenv("HTTPS_PROXY", "https://example.org")
+ # Delete in case our environment has any proxy env vars set
+ monkeypatch.delenv("HTTP_PROXY", raising=False)
+ monkeypatch.delenv("ALL_PROXY", raising=False)
+ monkeypatch.delenv("NO_PROXY", raising=False)
+ monkeypatch.delenv("http_proxy", raising=False)
+ monkeypatch.delenv("https_proxy", raising=False)
+ monkeypatch.delenv("all_proxy", raising=False)
+ monkeypatch.delenv("no_proxy", raising=False)
+
+ client = DefaultAsyncHttpxClient()
+
+ mounts = tuple(client._mounts.items())
+ assert len(mounts) == 1
+ assert mounts[0][0].pattern == "https://"
+
+ @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning")
+ async def test_default_client_creation(self) -> None:
+ # Ensure that the client can be initialized without any exceptions
+ DefaultAsyncHttpxClient(
+ verify=True,
+ cert=None,
+ trust_env=True,
+ http1=True,
+ http2=False,
+ limits=httpx.Limits(max_connections=100, max_keepalive_connections=20),
+ )
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_follow_redirects(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None:
+ # Test that the default follow_redirects=True allows following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+ respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
+
+ response = await async_client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+ assert response.status_code == 200
+ assert response.json() == {"status": "ok"}
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_follow_redirects_disabled(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None:
+ # Test that follow_redirects=False prevents following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+
+ with pytest.raises(APIStatusError) as exc_info:
+ await async_client.post(
+ "/redirect",
+ body={"key": "value"},
+ options={"follow_redirects": False},
+ cast_to=httpx.Response,
+ )
+
+ assert exc_info.value.response.status_code == 302
+ assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"
diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py
index 317130ef..b5520a27 100644
--- a/tests/test_deepcopy.py
+++ b/tests/test_deepcopy.py
@@ -1,4 +1,4 @@
-from digitalocean_genai_sdk._utils import deepcopy_minimal
+from gradient._utils import deepcopy_minimal
def assert_different_identities(obj1: object, obj2: object) -> None:
diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py
index aad87e09..9514d242 100644
--- a/tests/test_extract_files.py
+++ b/tests/test_extract_files.py
@@ -4,8 +4,8 @@
import pytest
-from digitalocean_genai_sdk._types import FileTypes
-from digitalocean_genai_sdk._utils import extract_files
+from gradient._types import FileTypes
+from gradient._utils import extract_files
def test_removes_files_from_input() -> None:
diff --git a/tests/test_files.py b/tests/test_files.py
index f3a07ce0..54210e83 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -4,41 +4,41 @@
import pytest
from dirty_equals import IsDict, IsList, IsBytes, IsTuple
-from digitalocean_genai_sdk._files import to_httpx_files, async_to_httpx_files
+from gradient._files import to_httpx_files, async_to_httpx_files
readme_path = Path(__file__).parent.parent.joinpath("README.md")
def test_pathlib_includes_file_name() -> None:
result = to_httpx_files({"file": readme_path})
- print(result)
+ print(result) # noqa: T201
assert result == IsDict({"file": IsTuple("README.md", IsBytes())})
def test_tuple_input() -> None:
result = to_httpx_files([("file", readme_path)])
- print(result)
+ print(result) # noqa: T201
assert result == IsList(IsTuple("file", IsTuple("README.md", IsBytes())))
@pytest.mark.asyncio
async def test_async_pathlib_includes_file_name() -> None:
result = await async_to_httpx_files({"file": readme_path})
- print(result)
+ print(result) # noqa: T201
assert result == IsDict({"file": IsTuple("README.md", IsBytes())})
@pytest.mark.asyncio
async def test_async_supports_anyio_path() -> None:
result = await async_to_httpx_files({"file": anyio.Path(readme_path)})
- print(result)
+ print(result) # noqa: T201
assert result == IsDict({"file": IsTuple("README.md", IsBytes())})
@pytest.mark.asyncio
async def test_async_tuple_input() -> None:
result = await async_to_httpx_files([("file", readme_path)])
- print(result)
+ print(result) # noqa: T201
assert result == IsList(IsTuple("file", IsTuple("README.md", IsBytes())))
diff --git a/tests/test_models.py b/tests/test_models.py
index 0be34866..ba635571 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -1,5 +1,5 @@
import json
-from typing import Any, Dict, List, Union, Optional, cast
+from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast
from datetime import datetime, timezone
from typing_extensions import Literal, Annotated, TypeAliasType
@@ -7,9 +7,9 @@
import pydantic
from pydantic import Field
-from digitalocean_genai_sdk._utils import PropertyInfo
-from digitalocean_genai_sdk._compat import PYDANTIC_V2, parse_obj, model_dump, model_json
-from digitalocean_genai_sdk._models import BaseModel, construct_type
+from gradient._utils import PropertyInfo
+from gradient._compat import PYDANTIC_V1, parse_obj, model_dump, model_json
+from gradient._models import DISCRIMINATOR_CACHE, BaseModel, construct_type
class BasicModel(BaseModel):
@@ -294,12 +294,12 @@ class Model(BaseModel):
assert cast(bool, m.foo) is True
m = Model.construct(foo={"name": 3})
- if PYDANTIC_V2:
- assert isinstance(m.foo, Submodel1)
- assert m.foo.name == 3 # type: ignore
- else:
+ if PYDANTIC_V1:
assert isinstance(m.foo, Submodel2)
assert m.foo.name == "3"
+ else:
+ assert isinstance(m.foo, Submodel1)
+ assert m.foo.name == 3 # type: ignore
def test_list_of_unions() -> None:
@@ -426,10 +426,10 @@ class Model(BaseModel):
expected = datetime(2019, 12, 27, 18, 11, 19, 117000, tzinfo=timezone.utc)
- if PYDANTIC_V2:
- expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}'
- else:
+ if PYDANTIC_V1:
expected_json = '{"created_at": "2019-12-27T18:11:19.117000+00:00"}'
+ else:
+ expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}'
model = Model.construct(created_at="2019-12-27T18:11:19.117Z")
assert model.created_at == expected
@@ -531,7 +531,7 @@ class Model2(BaseModel):
assert m4.to_dict(mode="python") == {"created_at": datetime.fromisoformat(time_str)}
assert m4.to_dict(mode="json") == {"created_at": time_str}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"):
m.to_dict(warnings=False)
@@ -556,7 +556,7 @@ class Model(BaseModel):
assert m3.model_dump() == {"foo": None}
assert m3.model_dump(exclude_none=True) == {}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"):
m.model_dump(round_trip=True)
@@ -580,10 +580,10 @@ class Model(BaseModel):
assert json.loads(m.to_json()) == {"FOO": "hello"}
assert json.loads(m.to_json(use_api_names=False)) == {"foo": "hello"}
- if PYDANTIC_V2:
- assert m.to_json(indent=None) == '{"FOO":"hello"}'
- else:
+ if PYDANTIC_V1:
assert m.to_json(indent=None) == '{"FOO": "hello"}'
+ else:
+ assert m.to_json(indent=None) == '{"FOO":"hello"}'
m2 = Model()
assert json.loads(m2.to_json()) == {}
@@ -595,7 +595,7 @@ class Model(BaseModel):
assert json.loads(m3.to_json()) == {"FOO": None}
assert json.loads(m3.to_json(exclude_none=True)) == {}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"):
m.to_json(warnings=False)
@@ -622,7 +622,7 @@ class Model(BaseModel):
assert json.loads(m3.model_dump_json()) == {"foo": None}
assert json.loads(m3.model_dump_json(exclude_none=True)) == {}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"):
m.model_dump_json(round_trip=True)
@@ -679,12 +679,12 @@ class B(BaseModel):
)
assert isinstance(m, A)
assert m.type == "a"
- if PYDANTIC_V2:
- assert m.data == 100 # type: ignore[comparison-overlap]
- else:
+ if PYDANTIC_V1:
# pydantic v1 automatically converts inputs to strings
# if the expected type is a str
assert m.data == "100"
+ else:
+ assert m.data == 100 # type: ignore[comparison-overlap]
def test_discriminated_unions_unknown_variant() -> None:
@@ -768,12 +768,12 @@ class B(BaseModel):
)
assert isinstance(m, A)
assert m.foo_type == "a"
- if PYDANTIC_V2:
- assert m.data == 100 # type: ignore[comparison-overlap]
- else:
+ if PYDANTIC_V1:
# pydantic v1 automatically converts inputs to strings
# if the expected type is a str
assert m.data == "100"
+ else:
+ assert m.data == 100 # type: ignore[comparison-overlap]
def test_discriminated_unions_overlapping_discriminators_invalid_data() -> None:
@@ -809,7 +809,7 @@ class B(BaseModel):
UnionType = cast(Any, Union[A, B])
- assert not hasattr(UnionType, "__discriminator__")
+ assert not DISCRIMINATOR_CACHE.get(UnionType)
m = construct_type(
value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[UnionType, PropertyInfo(discriminator="type")])
@@ -818,7 +818,7 @@ class B(BaseModel):
assert m.type == "b"
assert m.data == "foo" # type: ignore[comparison-overlap]
- discriminator = UnionType.__discriminator__
+ discriminator = DISCRIMINATOR_CACHE.get(UnionType)
assert discriminator is not None
m = construct_type(
@@ -830,10 +830,10 @@ class B(BaseModel):
# if the discriminator details object stays the same between invocations then
# we hit the cache
- assert UnionType.__discriminator__ is discriminator
+ assert DISCRIMINATOR_CACHE.get(UnionType) is discriminator
-@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1")
+@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1")
def test_type_alias_type() -> None:
Alias = TypeAliasType("Alias", str) # pyright: ignore
@@ -849,7 +849,7 @@ class Model(BaseModel):
assert m.union == "bar"
-@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1")
+@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1")
def test_field_named_cls() -> None:
class Model(BaseModel):
cls: str
@@ -889,3 +889,75 @@ class ModelB(BaseModel):
)
assert isinstance(m, ModelB)
+
+
+def test_nested_discriminated_union() -> None:
+ class InnerType1(BaseModel):
+ type: Literal["type_1"]
+
+ class InnerModel(BaseModel):
+ inner_value: str
+
+ class InnerType2(BaseModel):
+ type: Literal["type_2"]
+ some_inner_model: InnerModel
+
+ class Type1(BaseModel):
+ base_type: Literal["base_type_1"]
+ value: Annotated[
+ Union[
+ InnerType1,
+ InnerType2,
+ ],
+ PropertyInfo(discriminator="type"),
+ ]
+
+ class Type2(BaseModel):
+ base_type: Literal["base_type_2"]
+
+ T = Annotated[
+ Union[
+ Type1,
+ Type2,
+ ],
+ PropertyInfo(discriminator="base_type"),
+ ]
+
+ model = construct_type(
+ type_=T,
+ value={
+ "base_type": "base_type_1",
+ "value": {
+ "type": "type_2",
+ },
+ },
+ )
+ assert isinstance(model, Type1)
+ assert isinstance(model.value, InnerType2)
+
+
+@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2 for now")
+def test_extra_properties() -> None:
+ class Item(BaseModel):
+ prop: int
+
+ class Model(BaseModel):
+ __pydantic_extra__: Dict[str, Item] = Field(init=False) # pyright: ignore[reportIncompatibleVariableOverride]
+
+ other: str
+
+ if TYPE_CHECKING:
+
+ def __getattr__(self, attr: str) -> Item: ...
+
+ model = construct_type(
+ type_=Model,
+ value={
+ "a": {"prop": 1},
+ "other": "foo",
+ },
+ )
+ assert isinstance(model, Model)
+ assert model.a.prop == 1
+ assert isinstance(model.a, Item)
+ assert model.other == "foo"
diff --git a/tests/test_qs.py b/tests/test_qs.py
index 41824698..32fb2091 100644
--- a/tests/test_qs.py
+++ b/tests/test_qs.py
@@ -4,7 +4,7 @@
import pytest
-from digitalocean_genai_sdk._qs import Querystring, stringify
+from gradient._qs import Querystring, stringify
def test_empty() -> None:
diff --git a/tests/test_required_args.py b/tests/test_required_args.py
index 379ac794..3956dc02 100644
--- a/tests/test_required_args.py
+++ b/tests/test_required_args.py
@@ -2,7 +2,7 @@
import pytest
-from digitalocean_genai_sdk._utils import required_args
+from gradient._utils import required_args
def test_too_many_positional_params() -> None:
diff --git a/tests/test_response.py b/tests/test_response.py
index 768537aa..6dd53185 100644
--- a/tests/test_response.py
+++ b/tests/test_response.py
@@ -6,8 +6,8 @@
import pytest
import pydantic
-from digitalocean_genai_sdk import BaseModel, DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk._response import (
+from gradient import Gradient, BaseModel, AsyncGradient
+from gradient._response import (
APIResponse,
BaseAPIResponse,
AsyncAPIResponse,
@@ -15,8 +15,8 @@
AsyncBinaryAPIResponse,
extract_response_type,
)
-from digitalocean_genai_sdk._streaming import Stream
-from digitalocean_genai_sdk._base_client import FinalRequestOptions
+from gradient._streaming import Stream
+from gradient._base_client import FinalRequestOptions
class ConcreteBaseAPIResponse(APIResponse[bytes]): ...
@@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None:
def test_extract_response_type_direct_class_missing_type_arg() -> None:
with pytest.raises(
RuntimeError,
- match="Expected type to have a type argument at index 0 but it did not",
+ match="Expected type to have a type argument at index 0 but it did not",
):
extract_response_type(AsyncAPIResponse)
@@ -56,7 +56,7 @@ def test_extract_response_type_binary_response() -> None:
class PydanticModel(pydantic.BaseModel): ...
-def test_response_parse_mismatched_basemodel(client: DigitaloceanGenaiSDK) -> None:
+def test_response_parse_mismatched_basemodel(client: Gradient) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo"),
client=client,
@@ -68,13 +68,13 @@ def test_response_parse_mismatched_basemodel(client: DigitaloceanGenaiSDK) -> No
with pytest.raises(
TypeError,
- match="Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`",
+ match="Pydantic models must subclass our base model type, e.g. `from gradient import BaseModel`",
):
response.parse(to=PydanticModel)
@pytest.mark.asyncio
-async def test_async_response_parse_mismatched_basemodel(async_client: AsyncDigitaloceanGenaiSDK) -> None:
+async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGradient) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo"),
client=async_client,
@@ -86,12 +86,12 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncDigi
with pytest.raises(
TypeError,
- match="Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`",
+ match="Pydantic models must subclass our base model type, e.g. `from gradient import BaseModel`",
):
await response.parse(to=PydanticModel)
-def test_response_parse_custom_stream(client: DigitaloceanGenaiSDK) -> None:
+def test_response_parse_custom_stream(client: Gradient) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo"),
client=client,
@@ -106,7 +106,7 @@ def test_response_parse_custom_stream(client: DigitaloceanGenaiSDK) -> None:
@pytest.mark.asyncio
-async def test_async_response_parse_custom_stream(async_client: AsyncDigitaloceanGenaiSDK) -> None:
+async def test_async_response_parse_custom_stream(async_client: AsyncGradient) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo"),
client=async_client,
@@ -125,7 +125,7 @@ class CustomModel(BaseModel):
bar: int
-def test_response_parse_custom_model(client: DigitaloceanGenaiSDK) -> None:
+def test_response_parse_custom_model(client: Gradient) -> None:
response = APIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=client,
@@ -141,7 +141,7 @@ def test_response_parse_custom_model(client: DigitaloceanGenaiSDK) -> None:
@pytest.mark.asyncio
-async def test_async_response_parse_custom_model(async_client: AsyncDigitaloceanGenaiSDK) -> None:
+async def test_async_response_parse_custom_model(async_client: AsyncGradient) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=async_client,
@@ -156,7 +156,7 @@ async def test_async_response_parse_custom_model(async_client: AsyncDigitalocean
assert obj.bar == 2
-def test_response_parse_annotated_type(client: DigitaloceanGenaiSDK) -> None:
+def test_response_parse_annotated_type(client: Gradient) -> None:
response = APIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=client,
@@ -173,7 +173,7 @@ def test_response_parse_annotated_type(client: DigitaloceanGenaiSDK) -> None:
assert obj.bar == 2
-async def test_async_response_parse_annotated_type(async_client: AsyncDigitaloceanGenaiSDK) -> None:
+async def test_async_response_parse_annotated_type(async_client: AsyncGradient) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=async_client,
@@ -201,7 +201,7 @@ async def test_async_response_parse_annotated_type(async_client: AsyncDigitaloce
("FalSe", False),
],
)
-def test_response_parse_bool(client: DigitaloceanGenaiSDK, content: str, expected: bool) -> None:
+def test_response_parse_bool(client: Gradient, content: str, expected: bool) -> None:
response = APIResponse(
raw=httpx.Response(200, content=content),
client=client,
@@ -226,7 +226,7 @@ def test_response_parse_bool(client: DigitaloceanGenaiSDK, content: str, expecte
("FalSe", False),
],
)
-async def test_async_response_parse_bool(client: AsyncDigitaloceanGenaiSDK, content: str, expected: bool) -> None:
+async def test_async_response_parse_bool(client: AsyncGradient, content: str, expected: bool) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=content),
client=client,
@@ -245,7 +245,7 @@ class OtherModel(BaseModel):
@pytest.mark.parametrize("client", [False], indirect=True) # loose validation
-def test_response_parse_expect_model_union_non_json_content(client: DigitaloceanGenaiSDK) -> None:
+def test_response_parse_expect_model_union_non_json_content(client: Gradient) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}),
client=client,
@@ -262,9 +262,7 @@ def test_response_parse_expect_model_union_non_json_content(client: Digitalocean
@pytest.mark.asyncio
@pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation
-async def test_async_response_parse_expect_model_union_non_json_content(
- async_client: AsyncDigitaloceanGenaiSDK,
-) -> None:
+async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncGradient) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}),
client=async_client,
diff --git a/tests/test_smoke_sdk.py b/tests/test_smoke_sdk.py
new file mode 100644
index 00000000..41358ce2
--- /dev/null
+++ b/tests/test_smoke_sdk.py
@@ -0,0 +1,84 @@
+from __future__ import annotations
+
+import os
+
+import pytest
+
+from gradient import Gradient
+
+REQUIRED_ENV_VARS = (
+ "DIGITALOCEAN_ACCESS_TOKEN",
+ "GRADIENT_MODEL_ACCESS_KEY",
+ "GRADIENT_AGENT_ACCESS_KEY",
+ "GRADIENT_AGENT_ENDPOINT",
+)
+
+
+@pytest.mark.smoke
+def test_smoke_environment_and_client_state() -> None:
+ """Validate required env vars, client auto-loaded properties, and perform a minimal API call.
+
+ This central test ensures environment configuration & client state are correct so other sync
+ smoke tests can focus purely on API behavior without repeating these assertions.
+ """
+ missing = [k for k in REQUIRED_ENV_VARS if not os.getenv(k)]
+ if missing:
+ pytest.fail(
+ "Missing required environment variables for smoke tests: " + ", ".join(missing),
+ pytrace=False,
+ )
+
+ client = Gradient()
+
+ # Property assertions (auto-loaded from environment)
+ assert client.access_token == os.environ["DIGITALOCEAN_ACCESS_TOKEN"], "access_token not loaded from env"
+ assert client.model_access_key == os.environ["GRADIENT_MODEL_ACCESS_KEY"], "model_access_key not loaded from env"
+ assert client.agent_access_key == os.environ["GRADIENT_AGENT_ACCESS_KEY"], "agent_access_key not loaded from env"
+ expected_endpoint = os.environ["GRADIENT_AGENT_ENDPOINT"]
+ normalized_expected = (
+ expected_endpoint if expected_endpoint.startswith("https://") else f"https://{expected_endpoint}"
+ )
+ assert client.agent_endpoint == normalized_expected, "agent_endpoint not derived correctly from env"
+
+
+@pytest.mark.smoke
+def test_smoke_agents_listing() -> None:
+ client = Gradient()
+ # Minimal API surface check (agents list)
+ agents_list = client.agents.list()
+ assert agents_list is not None
+ assert hasattr(agents_list, "agents")
+
+
+@pytest.mark.smoke
+def test_smoke_gpu_droplets_listing() -> None:
+ client = Gradient()
+ droplets_list = client.gpu_droplets.list(type="gpus")
+ assert droplets_list is not None
+ assert hasattr(droplets_list, "droplets")
+
+
+@pytest.mark.smoke
+def test_smoke_inference_completion() -> None:
+ inference_client = Gradient()
+ completion = inference_client.chat.completions.create(
+ model="llama3-8b-instruct",
+ messages=[{"role": "user", "content": "ping"}],
+ )
+ # Basic structural checks
+ assert completion is not None
+ assert completion.choices, "Expected at least one choice in completion response"
+ assert completion.choices[0].message.content is not None
+
+
+@pytest.mark.smoke
+def test_smoke_agent_inference_chat() -> None:
+ agent_client = Gradient()
+
+ # Model may be resolved implicitly; if an explicit model is required and missing this can be adapted
+ completion = agent_client.agents.chat.completions.create(
+ model="", # Intentionally blank per original example; adjust if backend requires non-empty
+ messages=[{"role": "user", "content": "ping"}],
+ )
+ assert completion is not None
+ assert completion.choices
diff --git a/tests/test_smoke_sdk_async.py b/tests/test_smoke_sdk_async.py
new file mode 100644
index 00000000..8425fa77
--- /dev/null
+++ b/tests/test_smoke_sdk_async.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+import os
+
+import pytest
+
+from gradient import AsyncGradient
+
+REQUIRED_ENV_VARS = (
+ "DIGITALOCEAN_ACCESS_TOKEN",
+ "GRADIENT_MODEL_ACCESS_KEY",
+ "GRADIENT_AGENT_ACCESS_KEY",
+ "GRADIENT_AGENT_ENDPOINT",
+)
+
+
+@pytest.mark.smoke
+@pytest.mark.asyncio
+async def test_async_smoke_environment_and_client_state() -> None:
+ """Validate required env vars, client auto-loaded properties, and perform a minimal API call.
+
+ This central test ensures environment configuration & client state are correct so other async
+ smoke tests can focus purely on API behavior without repeating these assertions.
+ """
+ missing = [k for k in REQUIRED_ENV_VARS if not os.getenv(k)]
+ if missing:
+ pytest.fail(
+ "Missing required environment variables for async smoke tests: " + ", ".join(missing),
+ pytrace=False,
+ )
+
+ async with AsyncGradient() as client:
+ # Property assertions (auto-loaded from environment)
+ assert client.access_token == os.environ["DIGITALOCEAN_ACCESS_TOKEN"], "access_token not loaded from env"
+ assert client.model_access_key == os.environ["GRADIENT_MODEL_ACCESS_KEY"], (
+ "model_access_key not loaded from env"
+ )
+ assert client.agent_access_key == os.environ["GRADIENT_AGENT_ACCESS_KEY"], (
+ "agent_access_key not loaded from env"
+ )
+ expected_endpoint = os.environ["GRADIENT_AGENT_ENDPOINT"]
+ normalized_expected = (
+ expected_endpoint if expected_endpoint.startswith("https://") else f"https://{expected_endpoint}"
+ )
+ assert client.agent_endpoint == normalized_expected, "agent_endpoint not derived correctly from env"
+
+
+@pytest.mark.smoke
+@pytest.mark.asyncio
+async def test_async_smoke_agents_listing() -> None:
+ async with AsyncGradient() as client:
+ agents_list = await client.agents.list()
+ assert agents_list is not None
+ assert hasattr(agents_list, "agents")
+
+
+@pytest.mark.smoke
+@pytest.mark.asyncio
+async def test_async_smoke_gpu_droplets_listing() -> None:
+ async with AsyncGradient() as client:
+ droplets_list = await client.gpu_droplets.list(type="gpus")
+ assert droplets_list is not None
+ assert hasattr(droplets_list, "droplets")
+
+
+@pytest.mark.smoke
+@pytest.mark.asyncio
+async def test_async_smoke_inference_completion() -> None:
+ async with AsyncGradient() as inference_client:
+ completion = await inference_client.chat.completions.create(
+ model="llama3-8b-instruct",
+ messages=[{"role": "user", "content": "ping"}],
+ )
+ assert completion is not None
+ assert completion.choices
+ assert completion.choices[0].message.content is not None
+
+
+@pytest.mark.smoke
+@pytest.mark.asyncio
+async def test_async_smoke_agent_inference_chat() -> None:
+ async with AsyncGradient() as agent_client:
+ completion = await agent_client.agents.chat.completions.create(
+ model="",
+ messages=[{"role": "user", "content": "ping"}],
+ )
+ assert completion is not None
+ assert completion.choices
diff --git a/tests/test_streaming.py b/tests/test_streaming.py
index e707c674..c4a8e46f 100644
--- a/tests/test_streaming.py
+++ b/tests/test_streaming.py
@@ -5,13 +5,13 @@
import httpx
import pytest
-from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK
-from digitalocean_genai_sdk._streaming import Stream, AsyncStream, ServerSentEvent
+from gradient import Gradient, AsyncGradient
+from gradient._streaming import Stream, AsyncStream, ServerSentEvent
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_basic(sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK) -> None:
+async def test_basic(sync: bool, client: Gradient, async_client: AsyncGradient) -> None:
def body() -> Iterator[bytes]:
yield b"event: completion\n"
yield b'data: {"foo":true}\n'
@@ -28,9 +28,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_data_missing_event(
- sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK
-) -> None:
+async def test_data_missing_event(sync: bool, client: Gradient, async_client: AsyncGradient) -> None:
def body() -> Iterator[bytes]:
yield b'data: {"foo":true}\n'
yield b"\n"
@@ -46,9 +44,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_event_missing_data(
- sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK
-) -> None:
+async def test_event_missing_data(sync: bool, client: Gradient, async_client: AsyncGradient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b"\n"
@@ -64,9 +60,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_multiple_events(
- sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK
-) -> None:
+async def test_multiple_events(sync: bool, client: Gradient, async_client: AsyncGradient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b"\n"
@@ -88,9 +82,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_multiple_events_with_data(
- sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK
-) -> None:
+async def test_multiple_events_with_data(sync: bool, client: Gradient, async_client: AsyncGradient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b'data: {"foo":true}\n'
@@ -114,9 +106,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_multiple_data_lines_with_empty_line(
- sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK
-) -> None:
+async def test_multiple_data_lines_with_empty_line(sync: bool, client: Gradient, async_client: AsyncGradient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b"data: {\n"
@@ -138,9 +128,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_data_json_escaped_double_new_line(
- sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK
-) -> None:
+async def test_data_json_escaped_double_new_line(sync: bool, client: Gradient, async_client: AsyncGradient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b'data: {"foo": "my long\\n\\ncontent"}'
@@ -157,9 +145,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_multiple_data_lines(
- sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK
-) -> None:
+async def test_multiple_data_lines(sync: bool, client: Gradient, async_client: AsyncGradient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b"data: {\n"
@@ -179,8 +165,8 @@ def body() -> Iterator[bytes]:
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
async def test_special_new_line_character(
sync: bool,
- client: DigitaloceanGenaiSDK,
- async_client: AsyncDigitaloceanGenaiSDK,
+ client: Gradient,
+ async_client: AsyncGradient,
) -> None:
def body() -> Iterator[bytes]:
yield b'data: {"content":" culpa"}\n'
@@ -210,8 +196,8 @@ def body() -> Iterator[bytes]:
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
async def test_multi_byte_character_multiple_chunks(
sync: bool,
- client: DigitaloceanGenaiSDK,
- async_client: AsyncDigitaloceanGenaiSDK,
+ client: Gradient,
+ async_client: AsyncGradient,
) -> None:
def body() -> Iterator[bytes]:
yield b'data: {"content":"'
@@ -251,8 +237,8 @@ def make_event_iterator(
content: Iterator[bytes],
*,
sync: bool,
- client: DigitaloceanGenaiSDK,
- async_client: AsyncDigitaloceanGenaiSDK,
+ client: Gradient,
+ async_client: AsyncGradient,
) -> Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]:
if sync:
return Stream(cast_to=object, client=client, response=httpx.Response(200, content=content))._iter_events()
diff --git a/tests/test_transform.py b/tests/test_transform.py
index 3c29084e..098015a9 100644
--- a/tests/test_transform.py
+++ b/tests/test_transform.py
@@ -8,15 +8,15 @@
import pytest
-from digitalocean_genai_sdk._types import NOT_GIVEN, Base64FileInput
-from digitalocean_genai_sdk._utils import (
+from gradient._types import Base64FileInput, omit, not_given
+from gradient._utils import (
PropertyInfo,
transform as _transform,
parse_datetime,
async_transform as _async_transform,
)
-from digitalocean_genai_sdk._compat import PYDANTIC_V2
-from digitalocean_genai_sdk._models import BaseModel
+from gradient._compat import PYDANTIC_V1
+from gradient._models import BaseModel
_T = TypeVar("_T")
@@ -189,7 +189,7 @@ class DateModel(BaseModel):
@pytest.mark.asyncio
async def test_iso8601_format(use_async: bool) -> None:
dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00")
- tz = "Z" if PYDANTIC_V2 else "+00:00"
+ tz = "+00:00" if PYDANTIC_V1 else "Z"
assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap]
assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692" + tz} # type: ignore[comparison-overlap]
@@ -297,11 +297,11 @@ async def test_pydantic_unknown_field(use_async: bool) -> None:
@pytest.mark.asyncio
async def test_pydantic_mismatched_types(use_async: bool) -> None:
model = MyModel.construct(foo=True)
- if PYDANTIC_V2:
+ if PYDANTIC_V1:
+ params = await transform(model, Any, use_async)
+ else:
with pytest.warns(UserWarning):
params = await transform(model, Any, use_async)
- else:
- params = await transform(model, Any, use_async)
assert cast(Any, params) == {"foo": True}
@@ -309,11 +309,11 @@ async def test_pydantic_mismatched_types(use_async: bool) -> None:
@pytest.mark.asyncio
async def test_pydantic_mismatched_object_type(use_async: bool) -> None:
model = MyModel.construct(foo=MyModel.construct(hello="world"))
- if PYDANTIC_V2:
+ if PYDANTIC_V1:
+ params = await transform(model, Any, use_async)
+ else:
with pytest.warns(UserWarning):
params = await transform(model, Any, use_async)
- else:
- params = await transform(model, Any, use_async)
assert cast(Any, params) == {"foo": {"hello": "world"}}
@@ -450,4 +450,11 @@ async def test_transform_skipping(use_async: bool) -> None:
@pytest.mark.asyncio
async def test_strips_notgiven(use_async: bool) -> None:
assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"}
- assert await transform({"foo_bar": NOT_GIVEN}, Foo1, use_async) == {}
+ assert await transform({"foo_bar": not_given}, Foo1, use_async) == {}
+
+
+@parametrize
+@pytest.mark.asyncio
+async def test_strips_omit(use_async: bool) -> None:
+ assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"}
+ assert await transform({"foo_bar": omit}, Foo1, use_async) == {}
diff --git a/tests/test_utils/test_datetime_parse.py b/tests/test_utils/test_datetime_parse.py
new file mode 100644
index 00000000..6cbb1b6f
--- /dev/null
+++ b/tests/test_utils/test_datetime_parse.py
@@ -0,0 +1,110 @@
+"""
+Copied from https://github.com/pydantic/pydantic/blob/v1.10.22/tests/test_datetime_parse.py
+with modifications so it works without pydantic v1 imports.
+"""
+
+from typing import Type, Union
+from datetime import date, datetime, timezone, timedelta
+
+import pytest
+
+from gradient._utils import parse_date, parse_datetime
+
+
+def create_tz(minutes: int) -> timezone:
+ return timezone(timedelta(minutes=minutes))
+
+
+@pytest.mark.parametrize(
+ "value,result",
+ [
+ # Valid inputs
+ ("1494012444.883309", date(2017, 5, 5)),
+ (b"1494012444.883309", date(2017, 5, 5)),
+ (1_494_012_444.883_309, date(2017, 5, 5)),
+ ("1494012444", date(2017, 5, 5)),
+ (1_494_012_444, date(2017, 5, 5)),
+ (0, date(1970, 1, 1)),
+ ("2012-04-23", date(2012, 4, 23)),
+ (b"2012-04-23", date(2012, 4, 23)),
+ ("2012-4-9", date(2012, 4, 9)),
+ (date(2012, 4, 9), date(2012, 4, 9)),
+ (datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)),
+ # Invalid inputs
+ ("x20120423", ValueError),
+ ("2012-04-56", ValueError),
+ (19_999_999_999, date(2603, 10, 11)), # just before watershed
+ (20_000_000_001, date(1970, 8, 20)), # just after watershed
+ (1_549_316_052, date(2019, 2, 4)), # nowish in s
+ (1_549_316_052_104, date(2019, 2, 4)), # nowish in ms
+ (1_549_316_052_104_324, date(2019, 2, 4)), # nowish in μs
+ (1_549_316_052_104_324_096, date(2019, 2, 4)), # nowish in ns
+ ("infinity", date(9999, 12, 31)),
+ ("inf", date(9999, 12, 31)),
+ (float("inf"), date(9999, 12, 31)),
+ ("infinity ", date(9999, 12, 31)),
+ (int("1" + "0" * 100), date(9999, 12, 31)),
+ (1e1000, date(9999, 12, 31)),
+ ("-infinity", date(1, 1, 1)),
+ ("-inf", date(1, 1, 1)),
+ ("nan", ValueError),
+ ],
+)
+def test_date_parsing(value: Union[str, bytes, int, float], result: Union[date, Type[Exception]]) -> None:
+ if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance]
+ with pytest.raises(result):
+ parse_date(value)
+ else:
+ assert parse_date(value) == result
+
+
+@pytest.mark.parametrize(
+ "value,result",
+ [
+ # Valid inputs
+ # values in seconds
+ ("1494012444.883309", datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
+ (1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
+ ("1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ (b"1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ (1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ # values in ms
+ ("1494012444000.883309", datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)),
+ ("-1494012444000.883309", datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)),
+ (1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ ("2012-04-23T09:15:00", datetime(2012, 4, 23, 9, 15)),
+ ("2012-4-9 4:8:16", datetime(2012, 4, 9, 4, 8, 16)),
+ ("2012-04-23T09:15:00Z", datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)),
+ ("2012-4-9 4:8:16-0320", datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))),
+ ("2012-04-23T10:20:30.400+02:30", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))),
+ ("2012-04-23T10:20:30.400+02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))),
+ ("2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
+ (b"2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
+ (datetime(2017, 5, 5), datetime(2017, 5, 5)),
+ (0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
+ # Invalid inputs
+ ("x20120423091500", ValueError),
+ ("2012-04-56T09:15:90", ValueError),
+ ("2012-04-23T11:05:00-25:00", ValueError),
+ (19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)), # just before watershed
+ (20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)), # just after watershed
+ (1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)), # nowish in s
+ (1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)), # nowish in ms
+ (1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in μs
+ (1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in ns
+ ("infinity", datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ ("inf", datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ ("inf ", datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ (1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ (float("inf"), datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ ("-infinity", datetime(1, 1, 1, 0, 0)),
+ ("-inf", datetime(1, 1, 1, 0, 0)),
+ ("nan", ValueError),
+ ],
+)
+def test_datetime_parsing(value: Union[str, bytes, int, float], result: Union[datetime, Type[Exception]]) -> None:
+ if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance]
+ with pytest.raises(result):
+ parse_datetime(value)
+ else:
+ assert parse_datetime(value) == result
diff --git a/tests/test_utils/test_json.py b/tests/test_utils/test_json.py
new file mode 100644
index 00000000..4ba6d83c
--- /dev/null
+++ b/tests/test_utils/test_json.py
@@ -0,0 +1,126 @@
+from __future__ import annotations
+
+import datetime
+from typing import Union
+
+import pydantic
+
+from gradient import _compat
+from gradient._utils._json import openapi_dumps
+
+
+class TestOpenapiDumps:
+ def test_basic(self) -> None:
+ data = {"key": "value", "number": 42}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"key":"value","number":42}'
+
+ def test_datetime_serialization(self) -> None:
+ dt = datetime.datetime(2023, 1, 1, 12, 0, 0)
+ data = {"datetime": dt}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"datetime":"2023-01-01T12:00:00"}'
+
+ def test_pydantic_model_serialization(self) -> None:
+ class User(pydantic.BaseModel):
+ first_name: str
+ last_name: str
+ age: int
+
+ model_instance = User(first_name="John", last_name="Kramer", age=83)
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"first_name":"John","last_name":"Kramer","age":83}}'
+
+ def test_pydantic_model_with_default_values(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ role: str = "user"
+ active: bool = True
+ score: int = 0
+
+ model_instance = User(name="Alice")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Alice"}}'
+
+ def test_pydantic_model_with_default_values_overridden(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ role: str = "user"
+ active: bool = True
+
+ model_instance = User(name="Bob", role="admin", active=False)
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Bob","role":"admin","active":false}}'
+
+ def test_pydantic_model_with_alias(self) -> None:
+ class User(pydantic.BaseModel):
+ first_name: str = pydantic.Field(alias="firstName")
+ last_name: str = pydantic.Field(alias="lastName")
+
+ model_instance = User(firstName="John", lastName="Doe")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"firstName":"John","lastName":"Doe"}}'
+
+ def test_pydantic_model_with_alias_and_default(self) -> None:
+ class User(pydantic.BaseModel):
+ user_name: str = pydantic.Field(alias="userName")
+ user_role: str = pydantic.Field(default="member", alias="userRole")
+ is_active: bool = pydantic.Field(default=True, alias="isActive")
+
+ model_instance = User(userName="charlie")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"userName":"charlie"}}'
+
+ model_with_overrides = User(userName="diana", userRole="admin", isActive=False)
+ data = {"model": model_with_overrides}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"userName":"diana","userRole":"admin","isActive":false}}'
+
+ def test_pydantic_model_with_nested_models_and_defaults(self) -> None:
+ class Address(pydantic.BaseModel):
+ street: str
+ city: str = "Unknown"
+
+ class User(pydantic.BaseModel):
+ name: str
+ address: Address
+ verified: bool = False
+
+ if _compat.PYDANTIC_V1:
+ # to handle forward references in Pydantic v1
+ User.update_forward_refs(**locals()) # type: ignore[reportDeprecated]
+
+ address = Address(street="123 Main St")
+ user = User(name="Diana", address=address)
+ data = {"user": user}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"user":{"name":"Diana","address":{"street":"123 Main St"}}}'
+
+ address_with_city = Address(street="456 Oak Ave", city="Boston")
+ user_verified = User(name="Eve", address=address_with_city, verified=True)
+ data = {"user": user_verified}
+ json_bytes = openapi_dumps(data)
+ assert (
+ json_bytes == b'{"user":{"name":"Eve","address":{"street":"456 Oak Ave","city":"Boston"},"verified":true}}'
+ )
+
+ def test_pydantic_model_with_optional_fields(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ email: Union[str, None]
+ phone: Union[str, None]
+
+ model_with_none = User(name="Eve", email=None, phone=None)
+ data = {"model": model_with_none}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Eve","email":null,"phone":null}}'
+
+ model_with_values = User(name="Frank", email="frank@example.com", phone=None)
+ data = {"model": model_with_values}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Frank","email":"frank@example.com","phone":null}}'
diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py
index 6fe8c808..af6d092a 100644
--- a/tests/test_utils/test_proxy.py
+++ b/tests/test_utils/test_proxy.py
@@ -2,7 +2,7 @@
from typing import Any
from typing_extensions import override
-from digitalocean_genai_sdk._utils import LazyProxy
+from gradient._utils import LazyProxy
class RecursiveLazyProxy(LazyProxy[Any]):
diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py
index 72bf3422..5f9711a2 100644
--- a/tests/test_utils/test_typing.py
+++ b/tests/test_utils/test_typing.py
@@ -2,7 +2,7 @@
from typing import Generic, TypeVar, cast
-from digitalocean_genai_sdk._utils import extract_type_var_from_base
+from gradient._utils import extract_type_var_from_base
_T = TypeVar("_T")
_T2 = TypeVar("_T2")
diff --git a/tests/utils.py b/tests/utils.py
index e795e2e8..8d9112d6 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -4,22 +4,23 @@
import inspect
import traceback
import contextlib
-from typing import Any, TypeVar, Iterator, cast
+from typing import Any, TypeVar, Iterator, Sequence, cast
from datetime import date, datetime
from typing_extensions import Literal, get_args, get_origin, assert_type
-from digitalocean_genai_sdk._types import Omit, NoneType
-from digitalocean_genai_sdk._utils import (
+from gradient._types import Omit, NoneType
+from gradient._utils import (
is_dict,
is_list,
is_list_type,
is_union_type,
extract_type_arg,
+ is_sequence_type,
is_annotated_type,
is_type_alias_type,
)
-from digitalocean_genai_sdk._compat import PYDANTIC_V2, field_outer_type, get_model_fields
-from digitalocean_genai_sdk._models import BaseModel
+from gradient._compat import PYDANTIC_V1, field_outer_type, get_model_fields
+from gradient._models import BaseModel
BaseModelT = TypeVar("BaseModelT", bound=BaseModel)
@@ -27,12 +28,12 @@
def assert_matches_model(model: type[BaseModelT], value: BaseModelT, *, path: list[str]) -> bool:
for name, field in get_model_fields(model).items():
field_value = getattr(value, name)
- if PYDANTIC_V2:
- allow_none = False
- else:
+ if PYDANTIC_V1:
# in v1 nullability was structured differently
# https://docs.pydantic.dev/2.0/migration/#required-optional-and-nullable-fields
allow_none = getattr(field, "allow_none", False)
+ else:
+ allow_none = False
assert_matches_type(
field_outer_type(field),
@@ -71,6 +72,13 @@ def assert_matches_type(
if is_list_type(type_):
return _assert_list_type(type_, value)
+ if is_sequence_type(type_):
+ assert isinstance(value, Sequence)
+ inner_type = get_args(type_)[0]
+ for entry in value: # type: ignore
+ assert_type(inner_type, entry) # type: ignore
+ return
+
if origin == str:
assert isinstance(value, str)
elif origin == int: