From 335679e12bd1c622e00d5a0fbd7f6ab1f8f9caec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 15:51:33 +0200 Subject: [PATCH 01/25] chore(deps): bump jinja2 from 3.1.2 to 3.1.3 in /docs (#472) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/poetry.lock | 43 +++++-------------------------------------- 1 file changed, 5 insertions(+), 38 deletions(-) diff --git a/docs/poetry.lock b/docs/poetry.lock index fdd9002e4..2beed55bf 100644 --- a/docs/poetry.lock +++ b/docs/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "alabaster" version = "0.7.13" description = "A configurable sidebar-enabled Sphinx theme" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -16,7 +15,6 @@ files = [ name = "babel" version = "2.13.1" description = "Internationalization utilities" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -35,7 +33,6 @@ dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] name = "certifi" version = "2023.11.17" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -47,7 +44,6 @@ files = [ name = "charset-normalizer" version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -147,7 +143,6 @@ files = [ name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -159,7 +154,6 @@ files = [ name = "docutils" version = "0.18.1" description = "Docutils -- Python Documentation Utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -171,7 +165,6 @@ files = [ name = "idna" version = "3.6" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -183,7 +176,6 @@ files = [ name = "imagesize" version = "1.4.1" description = "Getting image size from png/jpeg/jpeg2000/gif file" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -195,7 +187,6 @@ files = [ name = "importlib-metadata" version = "6.8.0" description = "Read metadata from Python packages" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -213,14 +204,13 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs [[package]] name = "jinja2" -version = "3.1.2" +version = "3.1.3" description = "A very fast and expressive template engine." -category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, + {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, + {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, ] [package.dependencies] @@ -233,7 +223,6 @@ i18n = ["Babel (>=2.7)"] name = "markupsafe" version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -303,7 +292,6 @@ files = [ name = "packaging" version = "23.2" description = "Core utilities for Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -315,7 +303,6 @@ files = [ name = "pygments" version = "2.17.2" description = "Pygments is a syntax highlighting package written in Python." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -331,7 +318,6 @@ windows-terminal = ["colorama (>=0.4.6)"] name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -346,7 +332,6 @@ six = ">=1.5" name = "pytz" version = "2023.3.post1" description = "World timezone definitions, modern and historical" -category = "main" optional = false python-versions = "*" files = [ @@ -358,7 +343,6 @@ files = [ name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -380,6 +364,7 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -418,7 +403,6 @@ files = [ name = "requests" version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -440,7 +424,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "scaleway" version = "2.0.0" description = "Scaleway SDK for Python" -category = "main" optional = false python-versions = "^3.8" files = [] @@ -457,7 +440,6 @@ url = "../scaleway" name = "scaleway-async" version = "2.0.0" description = "Scaleway SDK for Python" -category = "main" optional = false python-versions = "^3.8" files = [] @@ -474,7 +456,6 @@ url = "../scaleway-async" name = "scaleway-core" version = "2.0.0" description = "Scaleway SDK for Python" -category = "main" optional = false python-versions = "^3.8" files = [] @@ -493,7 +474,6 @@ url = "../scaleway-core" name = "setuptools" version = "69.0.2" description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -510,7 +490,6 @@ testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jar name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -522,7 +501,6 @@ files = [ name = "snowballstemmer" version = "2.2.0" description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -category = "main" optional = false python-versions = "*" files = [ @@ -534,7 +512,6 @@ files = [ name = "sphinx" version = "5.3.0" description = "Python documentation generator" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -570,7 +547,6 @@ test = ["cython", "html5lib", "pytest (>=4.6)", "typed_ast"] name = "sphinx-rtd-theme" version = "1.3.0" description = "Read the Docs theme for Sphinx" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -590,7 +566,6 @@ dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client", "wheel"] name = "sphinxcontrib-applehelp" version = "1.0.4" description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -606,7 +581,6 @@ test = ["pytest"] name = "sphinxcontrib-devhelp" version = "1.0.2" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -622,7 +596,6 @@ test = ["pytest"] name = "sphinxcontrib-htmlhelp" version = "2.0.1" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -638,7 +611,6 @@ test = ["html5lib", "pytest"] name = "sphinxcontrib-jquery" version = "4.1" description = "Extension to include jQuery on newer Sphinx releases" -category = "main" optional = false python-versions = ">=2.7" files = [ @@ -653,7 +625,6 @@ Sphinx = ">=1.8" name = "sphinxcontrib-jsmath" version = "1.0.1" description = "A sphinx extension which renders display math in HTML via JavaScript" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -668,7 +639,6 @@ test = ["flake8", "mypy", "pytest"] name = "sphinxcontrib-qthelp" version = "1.0.3" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -684,7 +654,6 @@ test = ["pytest"] name = "sphinxcontrib-serializinghtml" version = "1.1.5" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -700,7 +669,6 @@ test = ["pytest"] name = "urllib3" version = "2.1.0" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -717,7 +685,6 @@ zstd = ["zstandard (>=0.18.0)"] name = "zipp" version = "3.17.0" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" optional = false python-versions = ">=3.8" files = [ From a42056ac71965b2ff8117c471495bcf73fc0df6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20L=C3=A9one?= Date: Tue, 2 Apr 2024 16:34:04 +0200 Subject: [PATCH 02/25] chore: add support for dependabot (pip, github-actions) (#473) --- .github/dependabot.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..437932a60 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,21 @@ +version: 2 +updates: + - package-ecosystem: pip + directory: /scaleway + schedule: + interval: monthly + + - package-ecosystem: pip + directory: /scaleway-async + schedule: + interval: monthly + + - package-ecosystem: pip + directory: /scaleway-core + schedule: + interval: monthly + + - package-ecosystem: github-actions + directory: / + schedule: + interval: monthly From 9fa2dfb7ea0b9d2dac661fbed862303ab6065818 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Tue, 2 Apr 2024 16:50:50 +0200 Subject: [PATCH 03/25] feat(iam): allow filtering by name in ListQuotas (#474) --- scaleway-async/scaleway_async/iam/v1alpha1/api.py | 6 ++++++ scaleway-async/scaleway_async/iam/v1alpha1/types.py | 5 +++++ scaleway/scaleway/iam/v1alpha1/api.py | 6 ++++++ scaleway/scaleway/iam/v1alpha1/types.py | 5 +++++ 4 files changed, 22 insertions(+) diff --git a/scaleway-async/scaleway_async/iam/v1alpha1/api.py b/scaleway-async/scaleway_async/iam/v1alpha1/api.py index fd9af2346..88f949da1 100644 --- a/scaleway-async/scaleway_async/iam/v1alpha1/api.py +++ b/scaleway-async/scaleway_async/iam/v1alpha1/api.py @@ -2018,6 +2018,7 @@ async def list_quota( page_size: Optional[int] = None, page: Optional[int] = None, organization_id: Optional[str] = None, + quotum_names: Optional[List[str]] = None, ) -> ListQuotaResponse: """ List all quotas in the Organization. @@ -2026,6 +2027,7 @@ async def list_quota( :param page_size: Number of results per page. Value must be between 1 and 100. :param page: Page number. Value must be greater than 1. :param organization_id: Filter by Organization ID. + :param quotum_names: List of quotum names to filter from. :return: :class:`ListQuotaResponse ` Usage: @@ -2043,6 +2045,7 @@ async def list_quota( or self.client.default_organization_id, "page": page, "page_size": page_size or self.client.default_page_size, + "quotum_names": quotum_names, }, ) @@ -2056,6 +2059,7 @@ async def list_quota_all( page_size: Optional[int] = None, page: Optional[int] = None, organization_id: Optional[str] = None, + quotum_names: Optional[List[str]] = None, ) -> List[Quotum]: """ List all quotas in the Organization. @@ -2064,6 +2068,7 @@ async def list_quota_all( :param page_size: Number of results per page. Value must be between 1 and 100. :param page: Page number. Value must be greater than 1. :param organization_id: Filter by Organization ID. + :param quotum_names: List of quotum names to filter from. :return: :class:`List[Quotum] ` Usage: @@ -2081,6 +2086,7 @@ async def list_quota_all( "page_size": page_size, "page": page, "organization_id": organization_id, + "quotum_names": quotum_names, }, ) diff --git a/scaleway-async/scaleway_async/iam/v1alpha1/types.py b/scaleway-async/scaleway_async/iam/v1alpha1/types.py index 000c6d97e..7cedeaac2 100644 --- a/scaleway-async/scaleway_async/iam/v1alpha1/types.py +++ b/scaleway-async/scaleway_async/iam/v1alpha1/types.py @@ -1436,6 +1436,11 @@ class ListQuotaRequest: Filter by Organization ID. """ + quotum_names: Optional[List[str]] + """ + List of quotum names to filter from. + """ + @dataclass class ListQuotaResponse: diff --git a/scaleway/scaleway/iam/v1alpha1/api.py b/scaleway/scaleway/iam/v1alpha1/api.py index b67f7a3fe..b50d33dab 100644 --- a/scaleway/scaleway/iam/v1alpha1/api.py +++ b/scaleway/scaleway/iam/v1alpha1/api.py @@ -2018,6 +2018,7 @@ def list_quota( page_size: Optional[int] = None, page: Optional[int] = None, organization_id: Optional[str] = None, + quotum_names: Optional[List[str]] = None, ) -> ListQuotaResponse: """ List all quotas in the Organization. @@ -2026,6 +2027,7 @@ def list_quota( :param page_size: Number of results per page. Value must be between 1 and 100. :param page: Page number. Value must be greater than 1. :param organization_id: Filter by Organization ID. + :param quotum_names: List of quotum names to filter from. :return: :class:`ListQuotaResponse ` Usage: @@ -2043,6 +2045,7 @@ def list_quota( or self.client.default_organization_id, "page": page, "page_size": page_size or self.client.default_page_size, + "quotum_names": quotum_names, }, ) @@ -2056,6 +2059,7 @@ def list_quota_all( page_size: Optional[int] = None, page: Optional[int] = None, organization_id: Optional[str] = None, + quotum_names: Optional[List[str]] = None, ) -> List[Quotum]: """ List all quotas in the Organization. @@ -2064,6 +2068,7 @@ def list_quota_all( :param page_size: Number of results per page. Value must be between 1 and 100. :param page: Page number. Value must be greater than 1. :param organization_id: Filter by Organization ID. + :param quotum_names: List of quotum names to filter from. :return: :class:`List[Quotum] ` Usage: @@ -2081,6 +2086,7 @@ def list_quota_all( "page_size": page_size, "page": page, "organization_id": organization_id, + "quotum_names": quotum_names, }, ) diff --git a/scaleway/scaleway/iam/v1alpha1/types.py b/scaleway/scaleway/iam/v1alpha1/types.py index 000c6d97e..7cedeaac2 100644 --- a/scaleway/scaleway/iam/v1alpha1/types.py +++ b/scaleway/scaleway/iam/v1alpha1/types.py @@ -1436,6 +1436,11 @@ class ListQuotaRequest: Filter by Organization ID. """ + quotum_names: Optional[List[str]] + """ + List of quotum names to filter from. + """ + @dataclass class ListQuotaResponse: From d49696cd67c8a132ccbaa10c6c24ee8f333594ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:51:30 +0200 Subject: [PATCH 04/25] chore(deps): bump actions/checkout from 3 to 4 (#475) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/checks.yml | 8 ++++---- .github/workflows/docs.yml | 2 +- .github/workflows/release.yml | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index f86a481c7..6ab0ebd5a 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -19,7 +19,7 @@ jobs: run: working-directory: ${{ matrix.lib }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: @@ -45,7 +45,7 @@ jobs: run: working-directory: ${{ matrix.lib }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: @@ -71,7 +71,7 @@ jobs: run: working-directory: ${{ matrix.lib }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: @@ -100,7 +100,7 @@ jobs: # run: # working-directory: ${{ matrix.lib }} # steps: - # - uses: actions/checkout@v3 + # - uses: actions/checkout@v4 # - name: Set up Python # uses: actions/setup-python@v4 # with: diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index a7bfe3185..7107b0ba8 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -15,7 +15,7 @@ jobs: run: working-directory: ./docs steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0617bb882..09f432a7b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -11,7 +11,7 @@ jobs: run: working-directory: scaleway-core steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: @@ -48,7 +48,7 @@ jobs: run: working-directory: ${{ matrix.lib }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: From e4c959cd9b30f301b74f26dbd3e483f2e306555e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 17:07:10 +0200 Subject: [PATCH 05/25] chore(deps): bump actions/setup-python from 4 to 5 (#476) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/checks.yml | 8 ++++---- .github/workflows/docs.yml | 2 +- .github/workflows/release.yml | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 6ab0ebd5a..81e276252 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -21,7 +21,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.8 - name: Install poetry @@ -47,7 +47,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.8 - name: Install poetry @@ -73,7 +73,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.8 - name: Install poetry @@ -102,7 +102,7 @@ jobs: # steps: # - uses: actions/checkout@v4 # - name: Set up Python - # uses: actions/setup-python@v4 + # uses: actions/setup-python@v5 # with: # python-version: 3.8 # - name: Install poetry diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 7107b0ba8..5d911534a 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -17,7 +17,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.8 - name: Install poetry diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 09f432a7b..eb1e27ce5 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,7 +13,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.8 - name: Install poetry @@ -50,7 +50,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.8 - name: Install poetry From 6f5f6b22abb0bd298d4cf6c98cb870f6e1938ce6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 17:08:25 +0200 Subject: [PATCH 06/25] chore(deps-dev): bump ruff from 0.0.286 to 0.3.5 in /scaleway-async (#477) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- scaleway-async/poetry.lock | 56 +++++++++++++---------------------- scaleway-async/pyproject.toml | 2 +- 2 files changed, 22 insertions(+), 36 deletions(-) diff --git a/scaleway-async/poetry.lock b/scaleway-async/poetry.lock index ac19aa15d..6c989c1b3 100644 --- a/scaleway-async/poetry.lock +++ b/scaleway-async/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "certifi" version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -16,7 +15,6 @@ files = [ name = "charset-normalizer" version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -116,7 +114,6 @@ files = [ name = "idna" version = "3.6" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -128,7 +125,6 @@ files = [ name = "mypy" version = "1.9.0" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -176,7 +172,6 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -188,7 +183,6 @@ files = [ name = "python-dateutil" version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -203,7 +197,6 @@ six = ">=1.5" name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -264,7 +257,6 @@ files = [ name = "requests" version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -284,36 +276,34 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.0.286" -description = "An extremely fast Python linter, written in Rust." -category = "dev" +version = "0.3.5" +description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.286-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:8e22cb557e7395893490e7f9cfea1073d19a5b1dd337f44fd81359b2767da4e9"}, - {file = "ruff-0.0.286-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:68ed8c99c883ae79a9133cb1a86d7130feee0397fdf5ba385abf2d53e178d3fa"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8301f0bb4ec1a5b29cfaf15b83565136c47abefb771603241af9d6038f8981e8"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acc4598f810bbc465ce0ed84417ac687e392c993a84c7eaf3abf97638701c1ec"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88c8e358b445eb66d47164fa38541cfcc267847d1e7a92dd186dddb1a0a9a17f"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:0433683d0c5dbcf6162a4beb2356e820a593243f1fa714072fec15e2e4f4c939"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddb61a0c4454cbe4623f4a07fef03c5ae921fe04fede8d15c6e36703c0a73b07"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:47549c7c0be24c8ae9f2bce6f1c49fbafea83bca80142d118306f08ec7414041"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:559aa793149ac23dc4310f94f2c83209eedb16908a0343663be19bec42233d25"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d73cfb1c3352e7aa0ce6fb2321f36fa1d4a2c48d2ceac694cb03611ddf0e4db6"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:3dad93b1f973c6d1db4b6a5da8690c5625a3fa32bdf38e543a6936e634b83dc3"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26afc0851f4fc3738afcf30f5f8b8612a31ac3455cb76e611deea80f5c0bf3ce"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:9b6b116d1c4000de1b9bf027131dbc3b8a70507788f794c6b09509d28952c512"}, - {file = "ruff-0.0.286-py3-none-win32.whl", hash = "sha256:556e965ac07c1e8c1c2d759ac512e526ecff62c00fde1a046acb088d3cbc1a6c"}, - {file = "ruff-0.0.286-py3-none-win_amd64.whl", hash = "sha256:5d295c758961376c84aaa92d16e643d110be32add7465e197bfdaec5a431a107"}, - {file = "ruff-0.0.286-py3-none-win_arm64.whl", hash = "sha256:1d6142d53ab7f164204b3133d053c4958d4d11ec3a39abf23a40b13b0784e3f0"}, - {file = "ruff-0.0.286.tar.gz", hash = "sha256:f1e9d169cce81a384a26ee5bb8c919fe9ae88255f39a1a69fd1ebab233a85ed2"}, + {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:aef5bd3b89e657007e1be6b16553c8813b221ff6d92c7526b7e0227450981eac"}, + {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:89b1e92b3bd9fca249153a97d23f29bed3992cff414b222fcd361d763fc53f12"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e55771559c89272c3ebab23326dc23e7f813e492052391fe7950c1a5a139d89"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dabc62195bf54b8a7876add6e789caae0268f34582333cda340497c886111c39"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a05f3793ba25f194f395578579c546ca5d83e0195f992edc32e5907d142bfa3"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dfd3504e881082959b4160ab02f7a205f0fadc0a9619cc481982b6837b2fd4c0"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87258e0d4b04046cf1d6cc1c56fadbf7a880cc3de1f7294938e923234cf9e498"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:712e71283fc7d9f95047ed5f793bc019b0b0a29849b14664a60fd66c23b96da1"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532a90b4a18d3f722c124c513ffb5e5eaff0cc4f6d3aa4bda38e691b8600c9f"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:122de171a147c76ada00f76df533b54676f6e321e61bd8656ae54be326c10296"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d80a6b18a6c3b6ed25b71b05eba183f37d9bc8b16ace9e3d700997f00b74660b"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a7b6e63194c68bca8e71f81de30cfa6f58ff70393cf45aab4c20f158227d5936"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a759d33a20c72f2dfa54dae6e85e1225b8e302e8ac655773aff22e542a300985"}, + {file = "ruff-0.3.5-py3-none-win32.whl", hash = "sha256:9d8605aa990045517c911726d21293ef4baa64f87265896e491a05461cae078d"}, + {file = "ruff-0.3.5-py3-none-win_amd64.whl", hash = "sha256:dc56bb16a63c1303bd47563c60482a1512721053d93231cf7e9e1c6954395a0e"}, + {file = "ruff-0.3.5-py3-none-win_arm64.whl", hash = "sha256:faeeae9905446b975dcf6d4499dc93439b131f1443ee264055c5716dd947af55"}, + {file = "ruff-0.3.5.tar.gz", hash = "sha256:a067daaeb1dc2baf9b82a32dae67d154d95212080c80435eb052d95da647763d"}, ] [[package]] name = "scaleway-core" version = "2.0.0" description = "Scaleway SDK for Python" -category = "main" optional = false python-versions = "^3.8" files = [] @@ -332,7 +322,6 @@ url = "../scaleway-core" name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -344,7 +333,6 @@ files = [ name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -356,7 +344,6 @@ files = [ name = "typing-extensions" version = "4.10.0" description = "Backported and Experimental Type Hints for Python 3.8+" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -368,7 +355,6 @@ files = [ name = "urllib3" version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -385,4 +371,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "441285c99087ba7e2715e85918cefdd80615528d46711cb9c85dc8d28e1f308c" +content-hash = "78ba73b892de4af6c5272e846096bb4efee07977266f4e0d2b6d3d4ca9835730" diff --git a/scaleway-async/pyproject.toml b/scaleway-async/pyproject.toml index 56a0a0aae..fbb86190c 100644 --- a/scaleway-async/pyproject.toml +++ b/scaleway-async/pyproject.toml @@ -28,7 +28,7 @@ scaleway-core = "*" [tool.poetry.group.dev.dependencies] scaleway-core = { path = "../scaleway-core", develop = true } -ruff = "^0.0.286" +ruff = ">=0.0.286,<0.3.6" mypy = "^1.5.1" [build-system] From 54263af1eaaefe0dbbc9ab5c9103777807f056ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 17:09:25 +0200 Subject: [PATCH 07/25] chore(deps-dev): bump ruff from 0.0.286 to 0.3.5 in /scaleway-core (#478) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- scaleway-core/poetry.lock | 56 ++++++++++++++---------------------- scaleway-core/pyproject.toml | 2 +- 2 files changed, 22 insertions(+), 36 deletions(-) diff --git a/scaleway-core/poetry.lock b/scaleway-core/poetry.lock index a8d4b38c1..ce223db0e 100644 --- a/scaleway-core/poetry.lock +++ b/scaleway-core/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "certifi" version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -16,7 +15,6 @@ files = [ name = "charset-normalizer" version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -116,7 +114,6 @@ files = [ name = "idna" version = "3.6" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -128,7 +125,6 @@ files = [ name = "mypy" version = "1.9.0" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -176,7 +172,6 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -188,7 +183,6 @@ files = [ name = "python-dateutil" version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -203,7 +197,6 @@ six = ">=1.5" name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -264,7 +257,6 @@ files = [ name = "requests" version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -284,36 +276,34 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.0.286" -description = "An extremely fast Python linter, written in Rust." -category = "dev" +version = "0.3.5" +description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.286-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:8e22cb557e7395893490e7f9cfea1073d19a5b1dd337f44fd81359b2767da4e9"}, - {file = "ruff-0.0.286-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:68ed8c99c883ae79a9133cb1a86d7130feee0397fdf5ba385abf2d53e178d3fa"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8301f0bb4ec1a5b29cfaf15b83565136c47abefb771603241af9d6038f8981e8"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acc4598f810bbc465ce0ed84417ac687e392c993a84c7eaf3abf97638701c1ec"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88c8e358b445eb66d47164fa38541cfcc267847d1e7a92dd186dddb1a0a9a17f"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:0433683d0c5dbcf6162a4beb2356e820a593243f1fa714072fec15e2e4f4c939"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddb61a0c4454cbe4623f4a07fef03c5ae921fe04fede8d15c6e36703c0a73b07"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:47549c7c0be24c8ae9f2bce6f1c49fbafea83bca80142d118306f08ec7414041"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:559aa793149ac23dc4310f94f2c83209eedb16908a0343663be19bec42233d25"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d73cfb1c3352e7aa0ce6fb2321f36fa1d4a2c48d2ceac694cb03611ddf0e4db6"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:3dad93b1f973c6d1db4b6a5da8690c5625a3fa32bdf38e543a6936e634b83dc3"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26afc0851f4fc3738afcf30f5f8b8612a31ac3455cb76e611deea80f5c0bf3ce"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:9b6b116d1c4000de1b9bf027131dbc3b8a70507788f794c6b09509d28952c512"}, - {file = "ruff-0.0.286-py3-none-win32.whl", hash = "sha256:556e965ac07c1e8c1c2d759ac512e526ecff62c00fde1a046acb088d3cbc1a6c"}, - {file = "ruff-0.0.286-py3-none-win_amd64.whl", hash = "sha256:5d295c758961376c84aaa92d16e643d110be32add7465e197bfdaec5a431a107"}, - {file = "ruff-0.0.286-py3-none-win_arm64.whl", hash = "sha256:1d6142d53ab7f164204b3133d053c4958d4d11ec3a39abf23a40b13b0784e3f0"}, - {file = "ruff-0.0.286.tar.gz", hash = "sha256:f1e9d169cce81a384a26ee5bb8c919fe9ae88255f39a1a69fd1ebab233a85ed2"}, + {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:aef5bd3b89e657007e1be6b16553c8813b221ff6d92c7526b7e0227450981eac"}, + {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:89b1e92b3bd9fca249153a97d23f29bed3992cff414b222fcd361d763fc53f12"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e55771559c89272c3ebab23326dc23e7f813e492052391fe7950c1a5a139d89"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dabc62195bf54b8a7876add6e789caae0268f34582333cda340497c886111c39"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a05f3793ba25f194f395578579c546ca5d83e0195f992edc32e5907d142bfa3"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dfd3504e881082959b4160ab02f7a205f0fadc0a9619cc481982b6837b2fd4c0"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87258e0d4b04046cf1d6cc1c56fadbf7a880cc3de1f7294938e923234cf9e498"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:712e71283fc7d9f95047ed5f793bc019b0b0a29849b14664a60fd66c23b96da1"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532a90b4a18d3f722c124c513ffb5e5eaff0cc4f6d3aa4bda38e691b8600c9f"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:122de171a147c76ada00f76df533b54676f6e321e61bd8656ae54be326c10296"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d80a6b18a6c3b6ed25b71b05eba183f37d9bc8b16ace9e3d700997f00b74660b"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a7b6e63194c68bca8e71f81de30cfa6f58ff70393cf45aab4c20f158227d5936"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a759d33a20c72f2dfa54dae6e85e1225b8e302e8ac655773aff22e542a300985"}, + {file = "ruff-0.3.5-py3-none-win32.whl", hash = "sha256:9d8605aa990045517c911726d21293ef4baa64f87265896e491a05461cae078d"}, + {file = "ruff-0.3.5-py3-none-win_amd64.whl", hash = "sha256:dc56bb16a63c1303bd47563c60482a1512721053d93231cf7e9e1c6954395a0e"}, + {file = "ruff-0.3.5-py3-none-win_arm64.whl", hash = "sha256:faeeae9905446b975dcf6d4499dc93439b131f1443ee264055c5716dd947af55"}, + {file = "ruff-0.3.5.tar.gz", hash = "sha256:a067daaeb1dc2baf9b82a32dae67d154d95212080c80435eb052d95da647763d"}, ] [[package]] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -325,7 +315,6 @@ files = [ name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -337,7 +326,6 @@ files = [ name = "types-python-dateutil" version = "2.9.0.20240316" description = "Typing stubs for python-dateutil" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -349,7 +337,6 @@ files = [ name = "typing-extensions" version = "4.10.0" description = "Backported and Experimental Type Hints for Python 3.8+" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -361,7 +348,6 @@ files = [ name = "urllib3" version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -378,4 +364,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "4af429f24e9bad42a4106375d71a3c54958185e5ef53421c960029977d5fef51" +content-hash = "735616faa9a6cf2c4f576d28e9b152e9d17649e5a36b5cfaf0887c5eb732e856" diff --git a/scaleway-core/pyproject.toml b/scaleway-core/pyproject.toml index 2464c35c5..e05b6148e 100644 --- a/scaleway-core/pyproject.toml +++ b/scaleway-core/pyproject.toml @@ -30,7 +30,7 @@ python-dateutil = "^2.8.2" [tool.poetry.group.dev.dependencies] types-python-dateutil = "^2.8.19" -ruff = "^0.0.286" +ruff = ">=0.0.286,<0.3.6" mypy = "^1.5.1" [build-system] From e0a4cb5c80ee85df49b2d552b5ec043aaba15ccb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 17:10:07 +0200 Subject: [PATCH 08/25] chore(deps-dev): bump ruff from 0.0.286 to 0.3.5 in /scaleway (#479) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- scaleway/poetry.lock | 56 ++++++++++++++++------------------------- scaleway/pyproject.toml | 2 +- 2 files changed, 22 insertions(+), 36 deletions(-) diff --git a/scaleway/poetry.lock b/scaleway/poetry.lock index ac19aa15d..6c989c1b3 100644 --- a/scaleway/poetry.lock +++ b/scaleway/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "certifi" version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -16,7 +15,6 @@ files = [ name = "charset-normalizer" version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -116,7 +114,6 @@ files = [ name = "idna" version = "3.6" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -128,7 +125,6 @@ files = [ name = "mypy" version = "1.9.0" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -176,7 +172,6 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -188,7 +183,6 @@ files = [ name = "python-dateutil" version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -203,7 +197,6 @@ six = ">=1.5" name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -264,7 +257,6 @@ files = [ name = "requests" version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -284,36 +276,34 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.0.286" -description = "An extremely fast Python linter, written in Rust." -category = "dev" +version = "0.3.5" +description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.286-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:8e22cb557e7395893490e7f9cfea1073d19a5b1dd337f44fd81359b2767da4e9"}, - {file = "ruff-0.0.286-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:68ed8c99c883ae79a9133cb1a86d7130feee0397fdf5ba385abf2d53e178d3fa"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8301f0bb4ec1a5b29cfaf15b83565136c47abefb771603241af9d6038f8981e8"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acc4598f810bbc465ce0ed84417ac687e392c993a84c7eaf3abf97638701c1ec"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88c8e358b445eb66d47164fa38541cfcc267847d1e7a92dd186dddb1a0a9a17f"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:0433683d0c5dbcf6162a4beb2356e820a593243f1fa714072fec15e2e4f4c939"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddb61a0c4454cbe4623f4a07fef03c5ae921fe04fede8d15c6e36703c0a73b07"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:47549c7c0be24c8ae9f2bce6f1c49fbafea83bca80142d118306f08ec7414041"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:559aa793149ac23dc4310f94f2c83209eedb16908a0343663be19bec42233d25"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d73cfb1c3352e7aa0ce6fb2321f36fa1d4a2c48d2ceac694cb03611ddf0e4db6"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:3dad93b1f973c6d1db4b6a5da8690c5625a3fa32bdf38e543a6936e634b83dc3"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26afc0851f4fc3738afcf30f5f8b8612a31ac3455cb76e611deea80f5c0bf3ce"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:9b6b116d1c4000de1b9bf027131dbc3b8a70507788f794c6b09509d28952c512"}, - {file = "ruff-0.0.286-py3-none-win32.whl", hash = "sha256:556e965ac07c1e8c1c2d759ac512e526ecff62c00fde1a046acb088d3cbc1a6c"}, - {file = "ruff-0.0.286-py3-none-win_amd64.whl", hash = "sha256:5d295c758961376c84aaa92d16e643d110be32add7465e197bfdaec5a431a107"}, - {file = "ruff-0.0.286-py3-none-win_arm64.whl", hash = "sha256:1d6142d53ab7f164204b3133d053c4958d4d11ec3a39abf23a40b13b0784e3f0"}, - {file = "ruff-0.0.286.tar.gz", hash = "sha256:f1e9d169cce81a384a26ee5bb8c919fe9ae88255f39a1a69fd1ebab233a85ed2"}, + {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:aef5bd3b89e657007e1be6b16553c8813b221ff6d92c7526b7e0227450981eac"}, + {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:89b1e92b3bd9fca249153a97d23f29bed3992cff414b222fcd361d763fc53f12"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e55771559c89272c3ebab23326dc23e7f813e492052391fe7950c1a5a139d89"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dabc62195bf54b8a7876add6e789caae0268f34582333cda340497c886111c39"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a05f3793ba25f194f395578579c546ca5d83e0195f992edc32e5907d142bfa3"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dfd3504e881082959b4160ab02f7a205f0fadc0a9619cc481982b6837b2fd4c0"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87258e0d4b04046cf1d6cc1c56fadbf7a880cc3de1f7294938e923234cf9e498"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:712e71283fc7d9f95047ed5f793bc019b0b0a29849b14664a60fd66c23b96da1"}, + {file = "ruff-0.3.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532a90b4a18d3f722c124c513ffb5e5eaff0cc4f6d3aa4bda38e691b8600c9f"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:122de171a147c76ada00f76df533b54676f6e321e61bd8656ae54be326c10296"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d80a6b18a6c3b6ed25b71b05eba183f37d9bc8b16ace9e3d700997f00b74660b"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a7b6e63194c68bca8e71f81de30cfa6f58ff70393cf45aab4c20f158227d5936"}, + {file = "ruff-0.3.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a759d33a20c72f2dfa54dae6e85e1225b8e302e8ac655773aff22e542a300985"}, + {file = "ruff-0.3.5-py3-none-win32.whl", hash = "sha256:9d8605aa990045517c911726d21293ef4baa64f87265896e491a05461cae078d"}, + {file = "ruff-0.3.5-py3-none-win_amd64.whl", hash = "sha256:dc56bb16a63c1303bd47563c60482a1512721053d93231cf7e9e1c6954395a0e"}, + {file = "ruff-0.3.5-py3-none-win_arm64.whl", hash = "sha256:faeeae9905446b975dcf6d4499dc93439b131f1443ee264055c5716dd947af55"}, + {file = "ruff-0.3.5.tar.gz", hash = "sha256:a067daaeb1dc2baf9b82a32dae67d154d95212080c80435eb052d95da647763d"}, ] [[package]] name = "scaleway-core" version = "2.0.0" description = "Scaleway SDK for Python" -category = "main" optional = false python-versions = "^3.8" files = [] @@ -332,7 +322,6 @@ url = "../scaleway-core" name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -344,7 +333,6 @@ files = [ name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -356,7 +344,6 @@ files = [ name = "typing-extensions" version = "4.10.0" description = "Backported and Experimental Type Hints for Python 3.8+" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -368,7 +355,6 @@ files = [ name = "urllib3" version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -385,4 +371,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "441285c99087ba7e2715e85918cefdd80615528d46711cb9c85dc8d28e1f308c" +content-hash = "78ba73b892de4af6c5272e846096bb4efee07977266f4e0d2b6d3d4ca9835730" diff --git a/scaleway/pyproject.toml b/scaleway/pyproject.toml index 825cc4ae8..e62da077d 100644 --- a/scaleway/pyproject.toml +++ b/scaleway/pyproject.toml @@ -28,7 +28,7 @@ scaleway-core = "*" [tool.poetry.group.dev.dependencies] scaleway-core = { path = "../scaleway-core", develop = true } -ruff = "^0.0.286" +ruff = ">=0.0.286,<0.3.6" mypy = "^1.5.1" [build-system] From 81c3f47bac5835874a20a54584e976d1612186e9 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Thu, 4 Apr 2024 14:39:33 +0200 Subject: [PATCH 09/25] feat(secret_manager): return type on AccessSecretVersion (#480) --- scaleway-async/scaleway_async/secret/v1beta1/api.py | 2 +- .../scaleway_async/secret/v1beta1/marshalling.py | 4 ++++ scaleway-async/scaleway_async/secret/v1beta1/types.py | 9 +++++++-- scaleway/scaleway/secret/v1beta1/api.py | 2 +- scaleway/scaleway/secret/v1beta1/marshalling.py | 4 ++++ scaleway/scaleway/secret/v1beta1/types.py | 9 +++++++-- 6 files changed, 24 insertions(+), 6 deletions(-) diff --git a/scaleway-async/scaleway_async/secret/v1beta1/api.py b/scaleway-async/scaleway_async/secret/v1beta1/api.py index 9f53f77b1..b3e64a57c 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/api.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/api.py @@ -75,7 +75,7 @@ async def create_secret( :param project_id: ID of the Project containing the secret. :param tags: List of the secret's tags. :param description: Description of the secret. - :param type_: (Optional.) See `Secret.Type` enum for description of values. If not specified, the type is `Opaque`. + :param type_: (Optional.) See the `Secret.Type` enum for a description of values. If not specified, the type is `Opaque`. :param path: (Optional.) Location of the secret in the directory structure. If not specified, the path is `/`. :param ephemeral_policy: (Optional.) Policy that defines whether/when a secret's versions expire. By default, the policy is applied to all the secret's versions. :return: :class:`Secret ` diff --git a/scaleway-async/scaleway_async/secret/v1beta1/marshalling.py b/scaleway-async/scaleway_async/secret/v1beta1/marshalling.py index c38b8cbe1..3dfc237a4 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/marshalling.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/marshalling.py @@ -206,6 +206,10 @@ def unmarshal_AccessSecretVersionResponse(data: Any) -> AccessSecretVersionRespo if field is not None: args["data"] = field + field = data.get("type_", None) + if field is not None: + args["type_"] = field + field = data.get("data_crc32", None) if field is not None: args["data_crc32"] = field diff --git a/scaleway-async/scaleway_async/secret/v1beta1/types.py b/scaleway-async/scaleway_async/secret/v1beta1/types.py index a03d231f1..fddeb57a2 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/types.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/types.py @@ -256,7 +256,7 @@ class Secret: type_: SecretType """ - See `Secret.Type` enum for description of values. + See the `Secret.Type` enum for a description of values. """ path: str @@ -318,6 +318,11 @@ class AccessSecretVersionResponse: The base64-encoded secret payload of the version. """ + type_: SecretType + """ + See the `Secret.Type` enum for a description of values. + """ + data_crc32: Optional[int] """ This field is only available if a CRC32 was supplied during the creation of the version. @@ -423,7 +428,7 @@ class CreateSecretRequest: type_: Optional[SecretType] """ - (Optional.) See `Secret.Type` enum for description of values. If not specified, the type is `Opaque`. + (Optional.) See the `Secret.Type` enum for a description of values. If not specified, the type is `Opaque`. """ path: Optional[str] diff --git a/scaleway/scaleway/secret/v1beta1/api.py b/scaleway/scaleway/secret/v1beta1/api.py index 172561c01..f7c2eba91 100644 --- a/scaleway/scaleway/secret/v1beta1/api.py +++ b/scaleway/scaleway/secret/v1beta1/api.py @@ -75,7 +75,7 @@ def create_secret( :param project_id: ID of the Project containing the secret. :param tags: List of the secret's tags. :param description: Description of the secret. - :param type_: (Optional.) See `Secret.Type` enum for description of values. If not specified, the type is `Opaque`. + :param type_: (Optional.) See the `Secret.Type` enum for a description of values. If not specified, the type is `Opaque`. :param path: (Optional.) Location of the secret in the directory structure. If not specified, the path is `/`. :param ephemeral_policy: (Optional.) Policy that defines whether/when a secret's versions expire. By default, the policy is applied to all the secret's versions. :return: :class:`Secret ` diff --git a/scaleway/scaleway/secret/v1beta1/marshalling.py b/scaleway/scaleway/secret/v1beta1/marshalling.py index c38b8cbe1..3dfc237a4 100644 --- a/scaleway/scaleway/secret/v1beta1/marshalling.py +++ b/scaleway/scaleway/secret/v1beta1/marshalling.py @@ -206,6 +206,10 @@ def unmarshal_AccessSecretVersionResponse(data: Any) -> AccessSecretVersionRespo if field is not None: args["data"] = field + field = data.get("type_", None) + if field is not None: + args["type_"] = field + field = data.get("data_crc32", None) if field is not None: args["data_crc32"] = field diff --git a/scaleway/scaleway/secret/v1beta1/types.py b/scaleway/scaleway/secret/v1beta1/types.py index a03d231f1..fddeb57a2 100644 --- a/scaleway/scaleway/secret/v1beta1/types.py +++ b/scaleway/scaleway/secret/v1beta1/types.py @@ -256,7 +256,7 @@ class Secret: type_: SecretType """ - See `Secret.Type` enum for description of values. + See the `Secret.Type` enum for a description of values. """ path: str @@ -318,6 +318,11 @@ class AccessSecretVersionResponse: The base64-encoded secret payload of the version. """ + type_: SecretType + """ + See the `Secret.Type` enum for a description of values. + """ + data_crc32: Optional[int] """ This field is only available if a CRC32 was supplied during the creation of the version. @@ -423,7 +428,7 @@ class CreateSecretRequest: type_: Optional[SecretType] """ - (Optional.) See `Secret.Type` enum for description of values. If not specified, the type is `Opaque`. + (Optional.) See the `Secret.Type` enum for a description of values. If not specified, the type is `Opaque`. """ path: Optional[str] From 6db93456f05c7b2f31c226668db16a18d97a954b Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Thu, 4 Apr 2024 14:43:18 +0200 Subject: [PATCH 10/25] feat(dedibox): add SDK support (#481) --- .../scaleway_async/dedibox/__init__.py | 2 + .../scaleway_async/dedibox/v1/__init__.py | 541 ++ .../scaleway_async/dedibox/v1/api.py | 4727 +++++++++++++++++ .../scaleway_async/dedibox/v1/content.py | 86 + .../scaleway_async/dedibox/v1/marshalling.py | 3523 ++++++++++++ .../scaleway_async/dedibox/v1/types.py | 4452 ++++++++++++++++ scaleway/scaleway/dedibox/__init__.py | 2 + scaleway/scaleway/dedibox/v1/__init__.py | 541 ++ scaleway/scaleway/dedibox/v1/api.py | 4721 ++++++++++++++++ scaleway/scaleway/dedibox/v1/content.py | 86 + scaleway/scaleway/dedibox/v1/marshalling.py | 3523 ++++++++++++ scaleway/scaleway/dedibox/v1/types.py | 4452 ++++++++++++++++ 12 files changed, 26656 insertions(+) create mode 100644 scaleway-async/scaleway_async/dedibox/__init__.py create mode 100644 scaleway-async/scaleway_async/dedibox/v1/__init__.py create mode 100644 scaleway-async/scaleway_async/dedibox/v1/api.py create mode 100644 scaleway-async/scaleway_async/dedibox/v1/content.py create mode 100644 scaleway-async/scaleway_async/dedibox/v1/marshalling.py create mode 100644 scaleway-async/scaleway_async/dedibox/v1/types.py create mode 100644 scaleway/scaleway/dedibox/__init__.py create mode 100644 scaleway/scaleway/dedibox/v1/__init__.py create mode 100644 scaleway/scaleway/dedibox/v1/api.py create mode 100644 scaleway/scaleway/dedibox/v1/content.py create mode 100644 scaleway/scaleway/dedibox/v1/marshalling.py create mode 100644 scaleway/scaleway/dedibox/v1/types.py diff --git a/scaleway-async/scaleway_async/dedibox/__init__.py b/scaleway-async/scaleway_async/dedibox/__init__.py new file mode 100644 index 000000000..8b74a5ed7 --- /dev/null +++ b/scaleway-async/scaleway_async/dedibox/__init__.py @@ -0,0 +1,2 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. diff --git a/scaleway-async/scaleway_async/dedibox/v1/__init__.py b/scaleway-async/scaleway_async/dedibox/v1/__init__.py new file mode 100644 index 000000000..e6b1144a7 --- /dev/null +++ b/scaleway-async/scaleway_async/dedibox/v1/__init__.py @@ -0,0 +1,541 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from .types import AttachFailoverIPToMacAddressRequestMacType +from .types import BMCAccessStatus +from .content import BMC_ACCESS_TRANSIENT_STATUSES +from .types import BackupStatus +from .types import FailoverBlockVersion +from .types import FailoverIPInterfaceType +from .types import FailoverIPStatus +from .types import FailoverIPVersion +from .types import GetRpnStatusResponseStatus +from .types import IPSemantic +from .types import IPStatus +from .types import IPVersion +from .types import IPv6BlockDelegationStatus +from .content import I_PV6_BLOCK_DELEGATION_TRANSIENT_STATUSES +from .types import InvoicePaymentMethod +from .types import InvoiceStatus +from .types import ListFailoverIPsRequestOrderBy +from .types import ListInvoicesRequestOrderBy +from .types import ListOSRequestOrderBy +from .types import ListOffersRequestOrderBy +from .types import ListRefundsRequestOrderBy +from .types import ListRpnCapableSanServersRequestOrderBy +from .types import ListRpnCapableServersRequestOrderBy +from .types import ListRpnGroupMembersRequestOrderBy +from .types import ListRpnGroupsRequestOrderBy +from .types import ListRpnInvitesRequestOrderBy +from .types import ListRpnSansRequestOrderBy +from .types import ListRpnServerCapabilitiesRequestOrderBy +from .types import ListRpnV2CapableResourcesRequestOrderBy +from .types import ListRpnV2GroupLogsRequestOrderBy +from .types import ListRpnV2GroupsRequestOrderBy +from .types import ListRpnV2MembersRequestOrderBy +from .types import ListRpnV2MembersRequestType +from .types import ListServerDisksRequestOrderBy +from .types import ListServerEventsRequestOrderBy +from .types import ListServersRequestOrderBy +from .types import ListServicesRequestOrderBy +from .types import LogAction +from .types import LogStatus +from .types import MemoryType +from .types import NetworkInterfaceInterfaceType +from .types import OSArch +from .types import OSType +from .types import OfferAntiDosInfoType +from .types import OfferCatalog +from .types import OfferPaymentFrequency +from .types import OfferSANInfoType +from .types import OfferServerInfoStock +from .types import PartitionFileSystem +from .types import PartitionType +from .types import RaidArrayRaidLevel +from .types import RefundMethod +from .types import RefundStatus +from .types import RescueProtocol +from .types import RpnGroupMemberStatus +from .content import RPN_GROUP_MEMBER_TRANSIENT_STATUSES +from .types import RpnGroupType +from .types import RpnSanIpType +from .types import RpnSanStatus +from .content import RPN_SAN_TRANSIENT_STATUSES +from .types import RpnV2GroupStatus +from .content import RPN_V2_GROUP_TRANSIENT_STATUSES +from .types import RpnV2GroupType +from .types import RpnV2MemberStatus +from .content import RPN_V2_MEMBER_TRANSIENT_STATUSES +from .types import ServerDiskType +from .types import ServerInstallStatus +from .content import SERVER_INSTALL_TRANSIENT_STATUSES +from .types import ServerStatus +from .content import SERVER_TRANSIENT_STATUSES +from .types import ServiceLevelLevel +from .types import ServiceProvisioningStatus +from .content import SERVICE_PROVISIONING_TRANSIENT_STATUSES +from .types import ServiceType +from .types import OfferAntiDosInfo +from .types import OfferBackupInfo +from .types import OfferBandwidthInfo +from .types import OfferLicenseInfo +from .types import OfferRPNInfo +from .types import OfferSANInfo +from .types import OfferStorageInfo +from .types import IP +from .types import Offer +from .types import NetworkInterface +from .types import OS +from .types import ServerLocation +from .types import ServerOption +from .types import ServiceLevel +from .types import RpnSan +from .types import RpnGroup +from .types import RpnV2GroupSubnet +from .types import Server +from .types import FailoverBlock +from .types import RpnSanIpRpnV2Group +from .types import RpnSanIpServer +from .types import RpnSanServer +from .types import RpnV2Group +from .types import RpnV2Member +from .types import ServerDisk +from .types import Service +from .types import GetIPv6BlockQuotasResponseQuota +from .types import InstallPartition +from .types import FailoverIP +from .types import ListIPv6BlockSubnetsAvailableResponseSubnet +from .types import InvoiceSummary +from .types import RpnSanIp +from .types import RefundSummary +from .types import RpnGroupMember +from .types import RpnSanSummary +from .types import RpnServerCapability +from .types import Log +from .types import ServerEvent +from .types import ServerSummary +from .types import CPU +from .types import Disk +from .types import Memory +from .types import PersistentMemory +from .types import RaidController +from .types import RaidArray +from .types import Partition +from .types import UpdatableRaidArray +from .types import AttachFailoverIPToMacAddressRequest +from .types import AttachFailoverIPsRequest +from .types import BMCAccess +from .types import Backup +from .types import BillingApiCanOrderRequest +from .types import BillingApiDownloadInvoiceRequest +from .types import BillingApiDownloadRefundRequest +from .types import BillingApiGetInvoiceRequest +from .types import BillingApiGetRefundRequest +from .types import BillingApiListInvoicesRequest +from .types import BillingApiListRefundsRequest +from .types import CanOrderResponse +from .types import CancelServerInstallRequest +from .types import CreateFailoverIPsRequest +from .types import CreateFailoverIPsResponse +from .types import CreateServerRequest +from .types import DeleteFailoverIPRequest +from .types import DeleteServerRequest +from .types import DeleteServiceRequest +from .types import DetachFailoverIPFromMacAddressRequest +from .types import DetachFailoverIPsRequest +from .types import GetBMCAccessRequest +from .types import GetFailoverIPRequest +from .types import GetIPv6BlockQuotasResponse +from .types import GetOSRequest +from .types import GetOfferRequest +from .types import GetOrderedServiceRequest +from .types import GetRaidRequest +from .types import GetRemainingQuotaRequest +from .types import GetRemainingQuotaResponse +from .types import GetRescueRequest +from .types import GetRpnStatusResponse +from .types import GetServerBackupRequest +from .types import GetServerDefaultPartitioningRequest +from .types import GetServerInstallRequest +from .types import GetServerRequest +from .types import GetServiceRequest +from .types import IPv6Block +from .types import IPv6BlockApiCreateIPv6BlockRequest +from .types import IPv6BlockApiCreateIPv6BlockSubnetRequest +from .types import IPv6BlockApiDeleteIPv6BlockRequest +from .types import IPv6BlockApiGetIPv6BlockQuotasRequest +from .types import IPv6BlockApiGetIPv6BlockRequest +from .types import IPv6BlockApiListIPv6BlockSubnetsAvailableRequest +from .types import IPv6BlockApiUpdateIPv6BlockRequest +from .types import InstallServerRequest +from .types import Invoice +from .types import ListFailoverIPsRequest +from .types import ListFailoverIPsResponse +from .types import ListIPv6BlockSubnetsAvailableResponse +from .types import ListInvoicesResponse +from .types import ListIpsResponse +from .types import ListOSRequest +from .types import ListOSResponse +from .types import ListOffersRequest +from .types import ListOffersResponse +from .types import ListRefundsResponse +from .types import ListRpnCapableSanServersResponse +from .types import ListRpnCapableServersResponse +from .types import ListRpnGroupMembersResponse +from .types import ListRpnGroupsResponse +from .types import ListRpnInvitesResponse +from .types import ListRpnSansResponse +from .types import ListRpnServerCapabilitiesResponse +from .types import ListRpnV2CapableResourcesResponse +from .types import ListRpnV2GroupLogsResponse +from .types import ListRpnV2GroupsResponse +from .types import ListRpnV2MembersResponse +from .types import ListServerDisksRequest +from .types import ListServerDisksResponse +from .types import ListServerEventsRequest +from .types import ListServerEventsResponse +from .types import ListServersRequest +from .types import ListServersResponse +from .types import ListServicesRequest +from .types import ListServicesResponse +from .types import ListSubscribableServerOptionsRequest +from .types import ListSubscribableServerOptionsResponse +from .types import OfferFailoverBlockInfo +from .types import OfferFailoverIpInfo +from .types import OfferServerInfo +from .types import OfferServiceLevelInfo +from .types import Raid +from .types import RebootServerRequest +from .types import Refund +from .types import Rescue +from .types import RpnApiGetRpnStatusRequest +from .types import RpnApiListRpnServerCapabilitiesRequest +from .types import RpnSanApiAddIpRequest +from .types import RpnSanApiCreateRpnSanRequest +from .types import RpnSanApiDeleteRpnSanRequest +from .types import RpnSanApiGetRpnSanRequest +from .types import RpnSanApiListAvailableIpsRequest +from .types import RpnSanApiListIpsRequest +from .types import RpnSanApiListRpnSansRequest +from .types import RpnSanApiRemoveIpRequest +from .types import RpnV1ApiAcceptRpnInviteRequest +from .types import RpnV1ApiAddRpnGroupMembersRequest +from .types import RpnV1ApiCreateRpnGroupRequest +from .types import RpnV1ApiDeleteRpnGroupMembersRequest +from .types import RpnV1ApiDeleteRpnGroupRequest +from .types import RpnV1ApiGetRpnGroupRequest +from .types import RpnV1ApiLeaveRpnGroupRequest +from .types import RpnV1ApiListRpnCapableSanServersRequest +from .types import RpnV1ApiListRpnCapableServersRequest +from .types import RpnV1ApiListRpnGroupMembersRequest +from .types import RpnV1ApiListRpnGroupsRequest +from .types import RpnV1ApiListRpnInvitesRequest +from .types import RpnV1ApiRefuseRpnInviteRequest +from .types import RpnV1ApiRpnGroupInviteRequest +from .types import RpnV1ApiUpdateRpnGroupNameRequest +from .types import RpnV2ApiAddRpnV2MembersRequest +from .types import RpnV2ApiCreateRpnV2GroupRequest +from .types import RpnV2ApiDeleteRpnV2GroupRequest +from .types import RpnV2ApiDeleteRpnV2MembersRequest +from .types import RpnV2ApiDisableRpnV2GroupCompatibilityRequest +from .types import RpnV2ApiEnableRpnV2GroupCompatibilityRequest +from .types import RpnV2ApiGetRpnV2GroupRequest +from .types import RpnV2ApiListRpnV2CapableResourcesRequest +from .types import RpnV2ApiListRpnV2GroupLogsRequest +from .types import RpnV2ApiListRpnV2GroupsRequest +from .types import RpnV2ApiListRpnV2MembersRequest +from .types import RpnV2ApiUpdateRpnV2GroupNameRequest +from .types import RpnV2ApiUpdateRpnV2VlanForMembersRequest +from .types import ServerDefaultPartitioning +from .types import ServerInstall +from .types import StartBMCAccessRequest +from .types import StartRescueRequest +from .types import StartServerRequest +from .types import StopBMCAccessRequest +from .types import StopRescueRequest +from .types import StopServerRequest +from .types import SubscribeServerOptionRequest +from .types import SubscribeStorageOptionsRequest +from .types import SubscribeStorageOptionsResponse +from .types import UpdateRaidRequest +from .types import UpdateReverseRequest +from .types import UpdateServerBackupRequest +from .types import UpdateServerRequest +from .types import UpdateServerTagsRequest +from .api import DediboxV1API +from .api import DediboxV1BillingAPI +from .api import DediboxV1IPv6BlockAPI +from .api import DediboxV1RpnAPI +from .api import DediboxV1RpnSanAPI +from .api import DediboxV1RpnV1API +from .api import DediboxV1RpnV2API + +__all__ = [ + "AttachFailoverIPToMacAddressRequestMacType", + "BMCAccessStatus", + "BMC_ACCESS_TRANSIENT_STATUSES", + "BackupStatus", + "FailoverBlockVersion", + "FailoverIPInterfaceType", + "FailoverIPStatus", + "FailoverIPVersion", + "GetRpnStatusResponseStatus", + "IPSemantic", + "IPStatus", + "IPVersion", + "IPv6BlockDelegationStatus", + "I_PV6_BLOCK_DELEGATION_TRANSIENT_STATUSES", + "InvoicePaymentMethod", + "InvoiceStatus", + "ListFailoverIPsRequestOrderBy", + "ListInvoicesRequestOrderBy", + "ListOSRequestOrderBy", + "ListOffersRequestOrderBy", + "ListRefundsRequestOrderBy", + "ListRpnCapableSanServersRequestOrderBy", + "ListRpnCapableServersRequestOrderBy", + "ListRpnGroupMembersRequestOrderBy", + "ListRpnGroupsRequestOrderBy", + "ListRpnInvitesRequestOrderBy", + "ListRpnSansRequestOrderBy", + "ListRpnServerCapabilitiesRequestOrderBy", + "ListRpnV2CapableResourcesRequestOrderBy", + "ListRpnV2GroupLogsRequestOrderBy", + "ListRpnV2GroupsRequestOrderBy", + "ListRpnV2MembersRequestOrderBy", + "ListRpnV2MembersRequestType", + "ListServerDisksRequestOrderBy", + "ListServerEventsRequestOrderBy", + "ListServersRequestOrderBy", + "ListServicesRequestOrderBy", + "LogAction", + "LogStatus", + "MemoryType", + "NetworkInterfaceInterfaceType", + "OSArch", + "OSType", + "OfferAntiDosInfoType", + "OfferCatalog", + "OfferPaymentFrequency", + "OfferSANInfoType", + "OfferServerInfoStock", + "PartitionFileSystem", + "PartitionType", + "RaidArrayRaidLevel", + "RefundMethod", + "RefundStatus", + "RescueProtocol", + "RpnGroupMemberStatus", + "RPN_GROUP_MEMBER_TRANSIENT_STATUSES", + "RpnGroupType", + "RpnSanIpType", + "RpnSanStatus", + "RPN_SAN_TRANSIENT_STATUSES", + "RpnV2GroupStatus", + "RPN_V2_GROUP_TRANSIENT_STATUSES", + "RpnV2GroupType", + "RpnV2MemberStatus", + "RPN_V2_MEMBER_TRANSIENT_STATUSES", + "ServerDiskType", + "ServerInstallStatus", + "SERVER_INSTALL_TRANSIENT_STATUSES", + "ServerStatus", + "SERVER_TRANSIENT_STATUSES", + "ServiceLevelLevel", + "ServiceProvisioningStatus", + "SERVICE_PROVISIONING_TRANSIENT_STATUSES", + "ServiceType", + "OfferAntiDosInfo", + "OfferBackupInfo", + "OfferBandwidthInfo", + "OfferLicenseInfo", + "OfferRPNInfo", + "OfferSANInfo", + "OfferStorageInfo", + "IP", + "Offer", + "NetworkInterface", + "OS", + "ServerLocation", + "ServerOption", + "ServiceLevel", + "RpnSan", + "RpnGroup", + "RpnV2GroupSubnet", + "Server", + "FailoverBlock", + "RpnSanIpRpnV2Group", + "RpnSanIpServer", + "RpnSanServer", + "RpnV2Group", + "RpnV2Member", + "ServerDisk", + "Service", + "GetIPv6BlockQuotasResponseQuota", + "InstallPartition", + "FailoverIP", + "ListIPv6BlockSubnetsAvailableResponseSubnet", + "InvoiceSummary", + "RpnSanIp", + "RefundSummary", + "RpnGroupMember", + "RpnSanSummary", + "RpnServerCapability", + "Log", + "ServerEvent", + "ServerSummary", + "CPU", + "Disk", + "Memory", + "PersistentMemory", + "RaidController", + "RaidArray", + "Partition", + "UpdatableRaidArray", + "AttachFailoverIPToMacAddressRequest", + "AttachFailoverIPsRequest", + "BMCAccess", + "Backup", + "BillingApiCanOrderRequest", + "BillingApiDownloadInvoiceRequest", + "BillingApiDownloadRefundRequest", + "BillingApiGetInvoiceRequest", + "BillingApiGetRefundRequest", + "BillingApiListInvoicesRequest", + "BillingApiListRefundsRequest", + "CanOrderResponse", + "CancelServerInstallRequest", + "CreateFailoverIPsRequest", + "CreateFailoverIPsResponse", + "CreateServerRequest", + "DeleteFailoverIPRequest", + "DeleteServerRequest", + "DeleteServiceRequest", + "DetachFailoverIPFromMacAddressRequest", + "DetachFailoverIPsRequest", + "GetBMCAccessRequest", + "GetFailoverIPRequest", + "GetIPv6BlockQuotasResponse", + "GetOSRequest", + "GetOfferRequest", + "GetOrderedServiceRequest", + "GetRaidRequest", + "GetRemainingQuotaRequest", + "GetRemainingQuotaResponse", + "GetRescueRequest", + "GetRpnStatusResponse", + "GetServerBackupRequest", + "GetServerDefaultPartitioningRequest", + "GetServerInstallRequest", + "GetServerRequest", + "GetServiceRequest", + "IPv6Block", + "IPv6BlockApiCreateIPv6BlockRequest", + "IPv6BlockApiCreateIPv6BlockSubnetRequest", + "IPv6BlockApiDeleteIPv6BlockRequest", + "IPv6BlockApiGetIPv6BlockQuotasRequest", + "IPv6BlockApiGetIPv6BlockRequest", + "IPv6BlockApiListIPv6BlockSubnetsAvailableRequest", + "IPv6BlockApiUpdateIPv6BlockRequest", + "InstallServerRequest", + "Invoice", + "ListFailoverIPsRequest", + "ListFailoverIPsResponse", + "ListIPv6BlockSubnetsAvailableResponse", + "ListInvoicesResponse", + "ListIpsResponse", + "ListOSRequest", + "ListOSResponse", + "ListOffersRequest", + "ListOffersResponse", + "ListRefundsResponse", + "ListRpnCapableSanServersResponse", + "ListRpnCapableServersResponse", + "ListRpnGroupMembersResponse", + "ListRpnGroupsResponse", + "ListRpnInvitesResponse", + "ListRpnSansResponse", + "ListRpnServerCapabilitiesResponse", + "ListRpnV2CapableResourcesResponse", + "ListRpnV2GroupLogsResponse", + "ListRpnV2GroupsResponse", + "ListRpnV2MembersResponse", + "ListServerDisksRequest", + "ListServerDisksResponse", + "ListServerEventsRequest", + "ListServerEventsResponse", + "ListServersRequest", + "ListServersResponse", + "ListServicesRequest", + "ListServicesResponse", + "ListSubscribableServerOptionsRequest", + "ListSubscribableServerOptionsResponse", + "OfferFailoverBlockInfo", + "OfferFailoverIpInfo", + "OfferServerInfo", + "OfferServiceLevelInfo", + "Raid", + "RebootServerRequest", + "Refund", + "Rescue", + "RpnApiGetRpnStatusRequest", + "RpnApiListRpnServerCapabilitiesRequest", + "RpnSanApiAddIpRequest", + "RpnSanApiCreateRpnSanRequest", + "RpnSanApiDeleteRpnSanRequest", + "RpnSanApiGetRpnSanRequest", + "RpnSanApiListAvailableIpsRequest", + "RpnSanApiListIpsRequest", + "RpnSanApiListRpnSansRequest", + "RpnSanApiRemoveIpRequest", + "RpnV1ApiAcceptRpnInviteRequest", + "RpnV1ApiAddRpnGroupMembersRequest", + "RpnV1ApiCreateRpnGroupRequest", + "RpnV1ApiDeleteRpnGroupMembersRequest", + "RpnV1ApiDeleteRpnGroupRequest", + "RpnV1ApiGetRpnGroupRequest", + "RpnV1ApiLeaveRpnGroupRequest", + "RpnV1ApiListRpnCapableSanServersRequest", + "RpnV1ApiListRpnCapableServersRequest", + "RpnV1ApiListRpnGroupMembersRequest", + "RpnV1ApiListRpnGroupsRequest", + "RpnV1ApiListRpnInvitesRequest", + "RpnV1ApiRefuseRpnInviteRequest", + "RpnV1ApiRpnGroupInviteRequest", + "RpnV1ApiUpdateRpnGroupNameRequest", + "RpnV2ApiAddRpnV2MembersRequest", + "RpnV2ApiCreateRpnV2GroupRequest", + "RpnV2ApiDeleteRpnV2GroupRequest", + "RpnV2ApiDeleteRpnV2MembersRequest", + "RpnV2ApiDisableRpnV2GroupCompatibilityRequest", + "RpnV2ApiEnableRpnV2GroupCompatibilityRequest", + "RpnV2ApiGetRpnV2GroupRequest", + "RpnV2ApiListRpnV2CapableResourcesRequest", + "RpnV2ApiListRpnV2GroupLogsRequest", + "RpnV2ApiListRpnV2GroupsRequest", + "RpnV2ApiListRpnV2MembersRequest", + "RpnV2ApiUpdateRpnV2GroupNameRequest", + "RpnV2ApiUpdateRpnV2VlanForMembersRequest", + "ServerDefaultPartitioning", + "ServerInstall", + "StartBMCAccessRequest", + "StartRescueRequest", + "StartServerRequest", + "StopBMCAccessRequest", + "StopRescueRequest", + "StopServerRequest", + "SubscribeServerOptionRequest", + "SubscribeStorageOptionsRequest", + "SubscribeStorageOptionsResponse", + "UpdateRaidRequest", + "UpdateReverseRequest", + "UpdateServerBackupRequest", + "UpdateServerRequest", + "UpdateServerTagsRequest", + "DediboxV1API", + "DediboxV1BillingAPI", + "DediboxV1IPv6BlockAPI", + "DediboxV1RpnAPI", + "DediboxV1RpnSanAPI", + "DediboxV1RpnV1API", + "DediboxV1RpnV2API", +] diff --git a/scaleway-async/scaleway_async/dedibox/v1/api.py b/scaleway-async/scaleway_async/dedibox/v1/api.py new file mode 100644 index 000000000..a5371fed0 --- /dev/null +++ b/scaleway-async/scaleway_async/dedibox/v1/api.py @@ -0,0 +1,4727 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import Awaitable, List, Optional, Union + +from scaleway_core.api import API +from scaleway_core.bridge import ( + ScwFile, + Zone, + unmarshal_ScwFile, +) +from scaleway_core.utils import ( + WaitForOptions, + validate_path_param, + fetch_all_pages_async, + wait_for_resource_async, +) +from .types import ( + AttachFailoverIPToMacAddressRequestMacType, + ListFailoverIPsRequestOrderBy, + ListInvoicesRequestOrderBy, + ListOSRequestOrderBy, + ListOffersRequestOrderBy, + ListRefundsRequestOrderBy, + ListRpnCapableSanServersRequestOrderBy, + ListRpnCapableServersRequestOrderBy, + ListRpnGroupMembersRequestOrderBy, + ListRpnGroupsRequestOrderBy, + ListRpnInvitesRequestOrderBy, + ListRpnSansRequestOrderBy, + ListRpnServerCapabilitiesRequestOrderBy, + ListRpnV2CapableResourcesRequestOrderBy, + ListRpnV2GroupLogsRequestOrderBy, + ListRpnV2GroupsRequestOrderBy, + ListRpnV2MembersRequestOrderBy, + ListRpnV2MembersRequestType, + ListServerDisksRequestOrderBy, + ListServerEventsRequestOrderBy, + ListServersRequestOrderBy, + ListServicesRequestOrderBy, + OSType, + OfferCatalog, + RpnSanIpType, + RpnV2GroupType, + AttachFailoverIPToMacAddressRequest, + AttachFailoverIPsRequest, + BMCAccess, + Backup, + CanOrderResponse, + CreateFailoverIPsRequest, + CreateFailoverIPsResponse, + CreateServerRequest, + DetachFailoverIPsRequest, + FailoverIP, + GetIPv6BlockQuotasResponse, + GetRemainingQuotaResponse, + GetRpnStatusResponse, + IP, + IPv6Block, + IPv6BlockApiCreateIPv6BlockRequest, + IPv6BlockApiCreateIPv6BlockSubnetRequest, + IPv6BlockApiUpdateIPv6BlockRequest, + InstallPartition, + InstallServerRequest, + Invoice, + InvoiceSummary, + ListFailoverIPsResponse, + ListIPv6BlockSubnetsAvailableResponse, + ListInvoicesResponse, + ListIpsResponse, + ListOSResponse, + ListOffersResponse, + ListRefundsResponse, + ListRpnCapableSanServersResponse, + ListRpnCapableServersResponse, + ListRpnGroupMembersResponse, + ListRpnGroupsResponse, + ListRpnInvitesResponse, + ListRpnSansResponse, + ListRpnServerCapabilitiesResponse, + ListRpnV2CapableResourcesResponse, + ListRpnV2GroupLogsResponse, + ListRpnV2GroupsResponse, + ListRpnV2MembersResponse, + ListServerDisksResponse, + ListServerEventsResponse, + ListServersResponse, + ListServicesResponse, + ListSubscribableServerOptionsResponse, + Log, + OS, + Offer, + Raid, + Refund, + RefundSummary, + Rescue, + RpnGroup, + RpnGroupMember, + RpnSan, + RpnSanApiAddIpRequest, + RpnSanApiCreateRpnSanRequest, + RpnSanApiRemoveIpRequest, + RpnSanServer, + RpnSanSummary, + RpnServerCapability, + RpnV1ApiAddRpnGroupMembersRequest, + RpnV1ApiCreateRpnGroupRequest, + RpnV1ApiDeleteRpnGroupMembersRequest, + RpnV1ApiLeaveRpnGroupRequest, + RpnV1ApiRpnGroupInviteRequest, + RpnV1ApiUpdateRpnGroupNameRequest, + RpnV2ApiAddRpnV2MembersRequest, + RpnV2ApiCreateRpnV2GroupRequest, + RpnV2ApiDeleteRpnV2MembersRequest, + RpnV2ApiEnableRpnV2GroupCompatibilityRequest, + RpnV2ApiUpdateRpnV2GroupNameRequest, + RpnV2ApiUpdateRpnV2VlanForMembersRequest, + RpnV2Group, + RpnV2Member, + Server, + ServerDefaultPartitioning, + ServerDisk, + ServerEvent, + ServerInstall, + ServerSummary, + Service, + StartBMCAccessRequest, + StartRescueRequest, + SubscribeServerOptionRequest, + SubscribeStorageOptionsRequest, + SubscribeStorageOptionsResponse, + UpdatableRaidArray, + UpdateRaidRequest, + UpdateReverseRequest, + UpdateServerBackupRequest, + UpdateServerRequest, + UpdateServerTagsRequest, +) +from .content import ( + BMC_ACCESS_TRANSIENT_STATUSES, + RPN_SAN_TRANSIENT_STATUSES, + RPN_V2_GROUP_TRANSIENT_STATUSES, + SERVER_INSTALL_TRANSIENT_STATUSES, + SERVER_TRANSIENT_STATUSES, +) +from .marshalling import ( + unmarshal_IP, + unmarshal_Offer, + unmarshal_OS, + unmarshal_RpnSan, + unmarshal_RpnGroup, + unmarshal_Server, + unmarshal_RpnV2Group, + unmarshal_Service, + unmarshal_FailoverIP, + unmarshal_BMCAccess, + unmarshal_Backup, + unmarshal_CanOrderResponse, + unmarshal_CreateFailoverIPsResponse, + unmarshal_GetIPv6BlockQuotasResponse, + unmarshal_GetRemainingQuotaResponse, + unmarshal_GetRpnStatusResponse, + unmarshal_IPv6Block, + unmarshal_Invoice, + unmarshal_ListFailoverIPsResponse, + unmarshal_ListIPv6BlockSubnetsAvailableResponse, + unmarshal_ListInvoicesResponse, + unmarshal_ListIpsResponse, + unmarshal_ListOSResponse, + unmarshal_ListOffersResponse, + unmarshal_ListRefundsResponse, + unmarshal_ListRpnCapableSanServersResponse, + unmarshal_ListRpnCapableServersResponse, + unmarshal_ListRpnGroupMembersResponse, + unmarshal_ListRpnGroupsResponse, + unmarshal_ListRpnInvitesResponse, + unmarshal_ListRpnSansResponse, + unmarshal_ListRpnServerCapabilitiesResponse, + unmarshal_ListRpnV2CapableResourcesResponse, + unmarshal_ListRpnV2GroupLogsResponse, + unmarshal_ListRpnV2GroupsResponse, + unmarshal_ListRpnV2MembersResponse, + unmarshal_ListServerDisksResponse, + unmarshal_ListServerEventsResponse, + unmarshal_ListServersResponse, + unmarshal_ListServicesResponse, + unmarshal_ListSubscribableServerOptionsResponse, + unmarshal_Raid, + unmarshal_Refund, + unmarshal_Rescue, + unmarshal_ServerDefaultPartitioning, + unmarshal_ServerInstall, + unmarshal_SubscribeStorageOptionsResponse, + marshal_AttachFailoverIPToMacAddressRequest, + marshal_AttachFailoverIPsRequest, + marshal_CreateFailoverIPsRequest, + marshal_CreateServerRequest, + marshal_DetachFailoverIPsRequest, + marshal_IPv6BlockApiCreateIPv6BlockRequest, + marshal_IPv6BlockApiCreateIPv6BlockSubnetRequest, + marshal_IPv6BlockApiUpdateIPv6BlockRequest, + marshal_InstallServerRequest, + marshal_RpnSanApiAddIpRequest, + marshal_RpnSanApiCreateRpnSanRequest, + marshal_RpnSanApiRemoveIpRequest, + marshal_RpnV1ApiAddRpnGroupMembersRequest, + marshal_RpnV1ApiCreateRpnGroupRequest, + marshal_RpnV1ApiDeleteRpnGroupMembersRequest, + marshal_RpnV1ApiLeaveRpnGroupRequest, + marshal_RpnV1ApiRpnGroupInviteRequest, + marshal_RpnV1ApiUpdateRpnGroupNameRequest, + marshal_RpnV2ApiAddRpnV2MembersRequest, + marshal_RpnV2ApiCreateRpnV2GroupRequest, + marshal_RpnV2ApiDeleteRpnV2MembersRequest, + marshal_RpnV2ApiEnableRpnV2GroupCompatibilityRequest, + marshal_RpnV2ApiUpdateRpnV2GroupNameRequest, + marshal_RpnV2ApiUpdateRpnV2VlanForMembersRequest, + marshal_StartBMCAccessRequest, + marshal_StartRescueRequest, + marshal_SubscribeServerOptionRequest, + marshal_SubscribeStorageOptionsRequest, + marshal_UpdateRaidRequest, + marshal_UpdateReverseRequest, + marshal_UpdateServerBackupRequest, + marshal_UpdateServerRequest, + marshal_UpdateServerTagsRequest, +) + + +class DediboxV1API(API): + """ + Dedibox Phoenix API. + """ + + async def list_servers( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServersRequestOrderBy] = None, + project_id: Optional[str] = None, + search: Optional[str] = None, + ) -> ListServersResponse: + """ + List baremetal servers for project. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server per page. + :param order_by: Order of the servers. + :param project_id: Filter servers by project ID. + :param search: Filter servers by hostname. + :return: :class:`ListServersResponse ` + + Usage: + :: + + result = await api.list_servers() + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "search": search, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListServersResponse(res.json()) + + async def list_servers_all( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServersRequestOrderBy] = None, + project_id: Optional[str] = None, + search: Optional[str] = None, + ) -> List[ServerSummary]: + """ + List baremetal servers for project. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server per page. + :param order_by: Order of the servers. + :param project_id: Filter servers by project ID. + :param search: Filter servers by hostname. + :return: :class:`List[ServerSummary] ` + + Usage: + :: + + result = await api.list_servers_all() + """ + + return await fetch_all_pages_async( + type=ListServersResponse, + key="servers", + fetcher=self.list_servers, + args={ + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + "search": search, + }, + ) + + async def get_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> Server: + """ + Get a specific baremetal server. + Get the server associated with the given ID. + :param server_id: ID of the server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Server ` + + Usage: + :: + + result = await api.get_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}", + ) + + self._throw_on_error(res) + return unmarshal_Server(res.json()) + + async def wait_for_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + options: Optional[WaitForOptions[Server, Union[bool, Awaitable[bool]]]] = None, + ) -> Server: + """ + Get a specific baremetal server. + Get the server associated with the given ID. + :param server_id: ID of the server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Server ` + + Usage: + :: + + result = await api.get_server( + server_id=1, + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = lambda res: res.status not in SERVER_TRANSIENT_STATUSES + + return await wait_for_resource_async( + fetcher=self.get_server, + options=options, + args={ + "server_id": server_id, + "zone": zone, + }, + ) + + async def get_server_backup( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> Backup: + """ + :param server_id: Server ID of the backup. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Backup ` + + Usage: + :: + + result = await api.get_server_backup( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/backups", + ) + + self._throw_on_error(res) + return unmarshal_Backup(res.json()) + + async def update_server_backup( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + password: Optional[str] = None, + autologin: Optional[bool] = None, + acl_enabled: Optional[bool] = None, + ) -> Backup: + """ + :param server_id: Server ID to update backup. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param password: Password of the server backup. + :param autologin: Autologin of the server backup. + :param acl_enabled: Boolean to enable or disable ACL. + :return: :class:`Backup ` + + Usage: + :: + + result = await api.update_server_backup( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/backups", + body=marshal_UpdateServerBackupRequest( + UpdateServerBackupRequest( + server_id=server_id, + zone=zone, + password=password, + autologin=autologin, + acl_enabled=acl_enabled, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Backup(res.json()) + + async def list_subscribable_server_options( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> ListSubscribableServerOptionsResponse: + """ + List subscribable server options. + List subscribable options associated to the given server ID. + :param server_id: Server ID of the subscribable server options. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of subscribable server option per page. + :return: :class:`ListSubscribableServerOptionsResponse ` + + Usage: + :: + + result = await api.list_subscribable_server_options( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/subscribable-server-options", + params={ + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListSubscribableServerOptionsResponse(res.json()) + + async def list_subscribable_server_options_all( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> List[Offer]: + """ + List subscribable server options. + List subscribable options associated to the given server ID. + :param server_id: Server ID of the subscribable server options. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of subscribable server option per page. + :return: :class:`List[Offer] ` + + Usage: + :: + + result = await api.list_subscribable_server_options_all( + server_id=1, + ) + """ + + return await fetch_all_pages_async( + type=ListSubscribableServerOptionsResponse, + key="server_options", + fetcher=self.list_subscribable_server_options, + args={ + "server_id": server_id, + "zone": zone, + "page": page, + "page_size": page_size, + }, + ) + + async def subscribe_server_option( + self, + *, + server_id: int, + option_id: int, + zone: Optional[Zone] = None, + ) -> Service: + """ + Subscribe server option. + Subscribe option for the given server ID. + :param server_id: Server ID to subscribe server option. + :param option_id: Option ID to subscribe. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Service ` + + Usage: + :: + + result = await api.subscribe_server_option( + server_id=1, + option_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/subscribe-server-option", + body=marshal_SubscribeServerOptionRequest( + SubscribeServerOptionRequest( + server_id=server_id, + option_id=option_id, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + async def create_server( + self, + *, + offer_id: int, + server_option_ids: List[int], + zone: Optional[Zone] = None, + project_id: Optional[str] = None, + datacenter_name: Optional[str] = None, + ) -> Service: + """ + Create a baremetal server. + Create a new baremetal server. The order return you a service ID to follow the provisionning status you could call GetService. + :param offer_id: Offer ID of the new server. + :param server_option_ids: Server option IDs of the new server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param project_id: Project ID of the new server. + :param datacenter_name: Datacenter name of the new server. + :return: :class:`Service ` + + Usage: + :: + + result = await api.create_server( + offer_id=1, + server_option_ids=[], + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers", + body=marshal_CreateServerRequest( + CreateServerRequest( + offer_id=offer_id, + server_option_ids=server_option_ids, + zone=zone, + project_id=project_id, + datacenter_name=datacenter_name, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + async def subscribe_storage_options( + self, + *, + server_id: int, + options_ids: List[int], + zone: Optional[Zone] = None, + ) -> SubscribeStorageOptionsResponse: + """ + Subscribe storage server option. + Subscribe storage option for the given server ID. + :param server_id: Server ID of the storage options to subscribe. + :param options_ids: Option IDs of the storage options to subscribe. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`SubscribeStorageOptionsResponse ` + + Usage: + :: + + result = await api.subscribe_storage_options( + server_id=1, + options_ids=[], + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/subscribe-storage-options", + body=marshal_SubscribeStorageOptionsRequest( + SubscribeStorageOptionsRequest( + server_id=server_id, + options_ids=options_ids, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_SubscribeStorageOptionsResponse(res.json()) + + async def update_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + hostname: Optional[str] = None, + enable_ipv6: Optional[bool] = None, + ) -> Server: + """ + Update a baremetal server. + Update the server associated with the given ID. + :param server_id: Server ID to update. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param hostname: Hostname of the server to update. + :param enable_ipv6: Flag to enable or not the IPv6 of server. + :return: :class:`Server ` + + Usage: + :: + + result = await api.update_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}", + body=marshal_UpdateServerRequest( + UpdateServerRequest( + server_id=server_id, + zone=zone, + hostname=hostname, + enable_ipv6=enable_ipv6, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Server(res.json()) + + async def update_server_tags( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + tags: Optional[List[str]] = None, + ) -> Server: + """ + :param server_id: Server ID to update the tags. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param tags: Tags of server to update. + :return: :class:`Server ` + + Usage: + :: + + result = await api.update_server_tags( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/tags", + body=marshal_UpdateServerTagsRequest( + UpdateServerTagsRequest( + server_id=server_id, + zone=zone, + tags=tags, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Server(res.json()) + + async def reboot_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Reboot a baremetal server. + Reboot the server associated with the given ID, use boot param to reboot in rescue. + :param server_id: Server ID to reboot. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.reboot_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/reboot", + body={}, + ) + + self._throw_on_error(res) + + async def start_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Start a baremetal server. + Start the server associated with the given ID. + :param server_id: Server ID to start. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.start_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/start", + body={}, + ) + + self._throw_on_error(res) + + async def stop_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Stop a baremetal server. + Stop the server associated with the given ID. + :param server_id: Server ID to stop. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.stop_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/stop", + body={}, + ) + + self._throw_on_error(res) + + async def delete_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Delete a baremetal server. + Delete the server associated with the given ID. + :param server_id: Server ID to delete. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.delete_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}", + ) + + self._throw_on_error(res) + + async def list_server_events( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServerEventsRequestOrderBy] = None, + ) -> ListServerEventsResponse: + """ + List server events. + List events associated to the given server ID. + :param server_id: Server ID of the server events. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server event per page. + :param order_by: Order of the server events. + :return: :class:`ListServerEventsResponse ` + + Usage: + :: + + result = await api.list_server_events( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/events", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListServerEventsResponse(res.json()) + + async def list_server_events_all( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServerEventsRequestOrderBy] = None, + ) -> List[ServerEvent]: + """ + List server events. + List events associated to the given server ID. + :param server_id: Server ID of the server events. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server event per page. + :param order_by: Order of the server events. + :return: :class:`List[ServerEvent] ` + + Usage: + :: + + result = await api.list_server_events_all( + server_id=1, + ) + """ + + return await fetch_all_pages_async( + type=ListServerEventsResponse, + key="events", + fetcher=self.list_server_events, + args={ + "server_id": server_id, + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + }, + ) + + async def list_server_disks( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServerDisksRequestOrderBy] = None, + ) -> ListServerDisksResponse: + """ + List server disks. + List disks associated to the given server ID. + :param server_id: Server ID of the server disks. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server disk per page. + :param order_by: Order of the server disks. + :return: :class:`ListServerDisksResponse ` + + Usage: + :: + + result = await api.list_server_disks( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/disks", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListServerDisksResponse(res.json()) + + async def list_server_disks_all( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServerDisksRequestOrderBy] = None, + ) -> List[ServerDisk]: + """ + List server disks. + List disks associated to the given server ID. + :param server_id: Server ID of the server disks. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server disk per page. + :param order_by: Order of the server disks. + :return: :class:`List[ServerDisk] ` + + Usage: + :: + + result = await api.list_server_disks_all( + server_id=1, + ) + """ + + return await fetch_all_pages_async( + type=ListServerDisksResponse, + key="disks", + fetcher=self.list_server_disks, + args={ + "server_id": server_id, + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + }, + ) + + async def get_ordered_service( + self, + *, + ordered_service_id: int, + zone: Optional[Zone] = None, + ) -> Service: + """ + :param ordered_service_id: + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Service ` + + Usage: + :: + + result = await api.get_ordered_service( + ordered_service_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ordered_service_id = validate_path_param( + "ordered_service_id", ordered_service_id + ) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/ordered-services/{param_ordered_service_id}", + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + async def get_service( + self, + *, + service_id: int, + zone: Optional[Zone] = None, + ) -> Service: + """ + Get a specific service. + Get the service associated with the given ID. + :param service_id: ID of the service. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Service ` + + Usage: + :: + + result = await api.get_service( + service_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_service_id = validate_path_param("service_id", service_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/zones/{param_zone}/services/{param_service_id}", + body={}, + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + async def delete_service( + self, + *, + service_id: int, + zone: Optional[Zone] = None, + ) -> Service: + """ + Delete a specific service. + Delete the service associated with the given ID. + :param service_id: ID of the service. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Service ` + + Usage: + :: + + result = await api.delete_service( + service_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_service_id = validate_path_param("service_id", service_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/zones/{param_zone}/services/{param_service_id}", + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + async def list_services( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServicesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListServicesResponse: + """ + List services. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of service per page. + :param order_by: Order of the services. + :param project_id: Project ID. + :return: :class:`ListServicesResponse ` + + Usage: + :: + + result = await api.list_services() + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/services", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListServicesResponse(res.json()) + + async def list_services_all( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServicesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[Service]: + """ + List services. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of service per page. + :param order_by: Order of the services. + :param project_id: Project ID. + :return: :class:`List[Service] ` + + Usage: + :: + + result = await api.list_services_all() + """ + + return await fetch_all_pages_async( + type=ListServicesResponse, + key="services", + fetcher=self.list_services, + args={ + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def install_server( + self, + *, + server_id: int, + os_id: int, + hostname: str, + zone: Optional[Zone] = None, + user_login: Optional[str] = None, + user_password: Optional[str] = None, + panel_password: Optional[str] = None, + root_password: Optional[str] = None, + partitions: Optional[List[InstallPartition]] = None, + ssh_key_ids: Optional[List[str]] = None, + license_offer_id: Optional[int] = None, + ip_id: Optional[int] = None, + ) -> ServerInstall: + """ + Install a baremetal server. + Install an OS on the server associated with the given ID. + :param server_id: Server ID to install. + :param os_id: OS ID to install on the server. + :param hostname: Hostname of the server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param user_login: User to install on the server. + :param user_password: User password to install on the server. + :param panel_password: Panel password to install on the server. + :param root_password: Root password to install on the server. + :param partitions: Partitions to install on the server. + :param ssh_key_ids: SSH key IDs authorized on the server. + :param license_offer_id: Offer ID of license to install on server. + :param ip_id: IP to link at the license to install on server. + :return: :class:`ServerInstall ` + + Usage: + :: + + result = await api.install_server( + server_id=1, + os_id=1, + hostname="example", + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/install", + body=marshal_InstallServerRequest( + InstallServerRequest( + server_id=server_id, + os_id=os_id, + hostname=hostname, + zone=zone, + user_login=user_login, + user_password=user_password, + panel_password=panel_password, + root_password=root_password, + partitions=partitions, + ssh_key_ids=ssh_key_ids, + license_offer_id=license_offer_id, + ip_id=ip_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_ServerInstall(res.json()) + + async def get_server_install( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> ServerInstall: + """ + Get a specific server installation status. + Get the server installation status associated with the given server ID. + :param server_id: Server ID of the server to install. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`ServerInstall ` + + Usage: + :: + + result = await api.get_server_install( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/install", + ) + + self._throw_on_error(res) + return unmarshal_ServerInstall(res.json()) + + async def wait_for_server_install( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + options: Optional[ + WaitForOptions[ServerInstall, Union[bool, Awaitable[bool]]] + ] = None, + ) -> ServerInstall: + """ + Get a specific server installation status. + Get the server installation status associated with the given server ID. + :param server_id: Server ID of the server to install. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`ServerInstall ` + + Usage: + :: + + result = await api.get_server_install( + server_id=1, + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = ( + lambda res: res.status not in SERVER_INSTALL_TRANSIENT_STATUSES + ) + + return await wait_for_resource_async( + fetcher=self.get_server_install, + options=options, + args={ + "server_id": server_id, + "zone": zone, + }, + ) + + async def cancel_server_install( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Cancels the current (running) server installation. + Cancels the current server installation associated with the given server ID. + :param server_id: Server ID of the server to cancel install. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.cancel_server_install( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/cancel-install", + ) + + self._throw_on_error(res) + + async def get_server_default_partitioning( + self, + *, + server_id: int, + os_id: int, + zone: Optional[Zone] = None, + ) -> ServerDefaultPartitioning: + """ + Get server default partitioning. + Get the server default partitioning schema associated with the given server ID and OS ID. + :param server_id: ID of the server. + :param os_id: OS ID of the default partitioning. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`ServerDefaultPartitioning ` + + Usage: + :: + + result = await api.get_server_default_partitioning( + server_id=1, + os_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + param_os_id = validate_path_param("os_id", os_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/partitioning/{param_os_id}", + ) + + self._throw_on_error(res) + return unmarshal_ServerDefaultPartitioning(res.json()) + + async def start_bmc_access( + self, + *, + server_id: int, + ip: str, + zone: Optional[Zone] = None, + ) -> None: + """ + Start BMC (Baseboard Management Controller) access for a given baremetal server. + Start BMC (Baseboard Management Controller) access associated with the given ID. + The BMC (Baseboard Management Controller) access is available one hour after the installation of the server. + :param server_id: ID of the server to start the BMC access. + :param ip: The IP authorized to connect to the given server. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.start_bmc_access( + server_id=1, + ip="example", + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/bmc-access", + body=marshal_StartBMCAccessRequest( + StartBMCAccessRequest( + server_id=server_id, + ip=ip, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def get_bmc_access( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> BMCAccess: + """ + Get BMC (Baseboard Management Controller) access for a given baremetal server. + Get the BMC (Baseboard Management Controller) access associated with the given ID. + :param server_id: ID of the server to get BMC access. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`BMCAccess ` + + Usage: + :: + + result = await api.get_bmc_access( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/bmc-access", + ) + + self._throw_on_error(res) + return unmarshal_BMCAccess(res.json()) + + async def wait_for_bmc_access( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + options: Optional[ + WaitForOptions[BMCAccess, Union[bool, Awaitable[bool]]] + ] = None, + ) -> BMCAccess: + """ + Get BMC (Baseboard Management Controller) access for a given baremetal server. + Get the BMC (Baseboard Management Controller) access associated with the given ID. + :param server_id: ID of the server to get BMC access. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`BMCAccess ` + + Usage: + :: + + result = await api.get_bmc_access( + server_id=1, + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = lambda res: res.status not in BMC_ACCESS_TRANSIENT_STATUSES + + return await wait_for_resource_async( + fetcher=self.get_bmc_access, + options=options, + args={ + "server_id": server_id, + "zone": zone, + }, + ) + + async def stop_bmc_access( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Stop BMC (Baseboard Management Controller) access for a given baremetal server. + Stop BMC (Baseboard Management Controller) access associated with the given ID. + :param server_id: ID of the server to stop BMC access. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.stop_bmc_access( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/bmc-access", + ) + + self._throw_on_error(res) + + async def list_offers( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListOffersRequestOrderBy] = None, + commercial_range: Optional[str] = None, + catalog: Optional[OfferCatalog] = None, + project_id: Optional[str] = None, + is_failover_ip: Optional[bool] = None, + is_failover_block: Optional[bool] = None, + sold_in: Optional[List[str]] = None, + available_only: Optional[bool] = None, + is_rpn_san: Optional[bool] = None, + ) -> ListOffersResponse: + """ + List offers. + List all available server offers. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of offer per page. + :param order_by: Order of the offers. + :param commercial_range: Filter on commercial range. + :param catalog: Filter on catalog. + :param project_id: Project ID. + :param is_failover_ip: Get the current failover IP offer. + :param is_failover_block: Get the current failover IP block offer. + :param sold_in: Filter offers depending on their datacenter. + :param available_only: Set this filter to true to only return available offers. + :param is_rpn_san: Get the RPN SAN offers. + :return: :class:`ListOffersResponse ` + + Usage: + :: + + result = await api.list_offers() + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/offers", + params={ + "available_only": available_only, + "catalog": catalog, + "commercial_range": commercial_range, + "is_failover_block": is_failover_block, + "is_failover_ip": is_failover_ip, + "is_rpn_san": is_rpn_san, + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "sold_in": ",".join(sold_in) if sold_in and len(sold_in) > 0 else None, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListOffersResponse(res.json()) + + async def list_offers_all( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListOffersRequestOrderBy] = None, + commercial_range: Optional[str] = None, + catalog: Optional[OfferCatalog] = None, + project_id: Optional[str] = None, + is_failover_ip: Optional[bool] = None, + is_failover_block: Optional[bool] = None, + sold_in: Optional[List[str]] = None, + available_only: Optional[bool] = None, + is_rpn_san: Optional[bool] = None, + ) -> List[Offer]: + """ + List offers. + List all available server offers. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of offer per page. + :param order_by: Order of the offers. + :param commercial_range: Filter on commercial range. + :param catalog: Filter on catalog. + :param project_id: Project ID. + :param is_failover_ip: Get the current failover IP offer. + :param is_failover_block: Get the current failover IP block offer. + :param sold_in: Filter offers depending on their datacenter. + :param available_only: Set this filter to true to only return available offers. + :param is_rpn_san: Get the RPN SAN offers. + :return: :class:`List[Offer] ` + + Usage: + :: + + result = await api.list_offers_all() + """ + + return await fetch_all_pages_async( + type=ListOffersResponse, + key="offers", + fetcher=self.list_offers, + args={ + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + "commercial_range": commercial_range, + "catalog": catalog, + "project_id": project_id, + "is_failover_ip": is_failover_ip, + "is_failover_block": is_failover_block, + "sold_in": sold_in, + "available_only": available_only, + "is_rpn_san": is_rpn_san, + }, + ) + + async def get_offer( + self, + *, + offer_id: int, + zone: Optional[Zone] = None, + project_id: Optional[str] = None, + ) -> Offer: + """ + Get offer. + Return specific offer for the given ID. + :param offer_id: ID of offer. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param project_id: Project ID. + :return: :class:`Offer ` + + Usage: + :: + + result = await api.get_offer( + offer_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_offer_id = validate_path_param("offer_id", offer_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/offers/{param_offer_id}", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_Offer(res.json()) + + async def list_os( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListOSRequestOrderBy] = None, + type_: Optional[OSType] = None, + project_id: Optional[str] = None, + ) -> ListOSResponse: + """ + List all available OS that can be install on a baremetal server. + :param server_id: Filter OS by compatible server ID. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of OS per page. + :param order_by: Order of the OS. + :param type_: Type of the OS. + :param project_id: Project ID. + :return: :class:`ListOSResponse ` + + Usage: + :: + + result = await api.list_os( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/os", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "server_id": server_id, + "type": type_, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListOSResponse(res.json()) + + async def list_os_all( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListOSRequestOrderBy] = None, + type_: Optional[OSType] = None, + project_id: Optional[str] = None, + ) -> List[OS]: + """ + List all available OS that can be install on a baremetal server. + :param server_id: Filter OS by compatible server ID. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of OS per page. + :param order_by: Order of the OS. + :param type_: Type of the OS. + :param project_id: Project ID. + :return: :class:`List[OS] ` + + Usage: + :: + + result = await api.list_os_all( + server_id=1, + ) + """ + + return await fetch_all_pages_async( + type=ListOSResponse, + key="os", + fetcher=self.list_os, + args={ + "server_id": server_id, + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + "type_": type_, + "project_id": project_id, + }, + ) + + async def get_os( + self, + *, + os_id: int, + server_id: int, + zone: Optional[Zone] = None, + project_id: Optional[str] = None, + ) -> OS: + """ + Get an OS with a given ID. + Return specific OS for the given ID. + :param os_id: ID of the OS. + :param server_id: ID of the server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param project_id: Project ID. + :return: :class:`OS ` + + Usage: + :: + + result = await api.get_os( + os_id=1, + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_os_id = validate_path_param("os_id", os_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/os/{param_os_id}", + params={ + "project_id": project_id or self.client.default_project_id, + "server_id": server_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_OS(res.json()) + + async def update_reverse( + self, + *, + ip_id: int, + reverse: str, + zone: Optional[Zone] = None, + ) -> IP: + """ + Update reverse of ip. + Update reverse of ip associated with the given ID. + :param ip_id: ID of the IP. + :param reverse: Reverse to apply on the IP. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`IP ` + + Usage: + :: + + result = await api.update_reverse( + ip_id=1, + reverse="example", + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ip_id = validate_path_param("ip_id", ip_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/zones/{param_zone}/reverses/{param_ip_id}", + body=marshal_UpdateReverseRequest( + UpdateReverseRequest( + ip_id=ip_id, + reverse=reverse, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_IP(res.json()) + + async def create_failover_i_ps( + self, + *, + offer_id: int, + quantity: int, + zone: Optional[Zone] = None, + project_id: Optional[str] = None, + ) -> CreateFailoverIPsResponse: + """ + Order failover IPs. + Order X failover IPs. + :param offer_id: Failover IP offer ID. + :param quantity: Quantity. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param project_id: Project ID. + :return: :class:`CreateFailoverIPsResponse ` + + Usage: + :: + + result = await api.create_failover_i_ps( + offer_id=1, + quantity=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/failover-ips", + body=marshal_CreateFailoverIPsRequest( + CreateFailoverIPsRequest( + offer_id=offer_id, + quantity=quantity, + zone=zone, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_CreateFailoverIPsResponse(res.json()) + + async def attach_failover_i_ps( + self, + *, + server_id: int, + fips_ids: List[int], + zone: Optional[Zone] = None, + ) -> None: + """ + Attach failovers on baremetal server. + Attach failovers on the server associated with the given ID. + :param server_id: ID of the server. + :param fips_ids: List of ID of failovers IP to attach. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.attach_failover_i_ps( + server_id=1, + fips_ids=[], + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/failover-ips/attach", + body=marshal_AttachFailoverIPsRequest( + AttachFailoverIPsRequest( + server_id=server_id, + fips_ids=fips_ids, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def detach_failover_i_ps( + self, + *, + fips_ids: List[int], + zone: Optional[Zone] = None, + ) -> None: + """ + Detach failovers on baremetal server. + Detach failovers on the server associated with the given ID. + :param fips_ids: List of IDs of failovers IP to detach. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.detach_failover_i_ps( + fips_ids=[], + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/failover-ips/detach", + body=marshal_DetachFailoverIPsRequest( + DetachFailoverIPsRequest( + fips_ids=fips_ids, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def attach_failover_ip_to_mac_address( + self, + *, + ip_id: int, + zone: Optional[Zone] = None, + type_: Optional[AttachFailoverIPToMacAddressRequestMacType] = None, + mac: Optional[str] = None, + ) -> IP: + """ + Attach a failover IP to a MAC address. + :param ip_id: ID of the failover IP. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param type_: A mac type. + :param mac: A valid mac address (existing or not). + :return: :class:`IP ` + + Usage: + :: + + result = await api.attach_failover_ip_to_mac_address( + ip_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ip_id = validate_path_param("ip_id", ip_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/failover-ips/{param_ip_id}/attach-to-mac-address", + body=marshal_AttachFailoverIPToMacAddressRequest( + AttachFailoverIPToMacAddressRequest( + ip_id=ip_id, + zone=zone, + type_=type_, + mac=mac, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_IP(res.json()) + + async def detach_failover_ip_from_mac_address( + self, + *, + ip_id: int, + zone: Optional[Zone] = None, + ) -> IP: + """ + Detach a failover IP from a MAC address. + :param ip_id: ID of the failover IP. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`IP ` + + Usage: + :: + + result = await api.detach_failover_ip_from_mac_address( + ip_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ip_id = validate_path_param("ip_id", ip_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/failover-ips/{param_ip_id}/detach-from-mac-address", + body={}, + ) + + self._throw_on_error(res) + return unmarshal_IP(res.json()) + + async def delete_failover_ip( + self, + *, + ip_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Delete a failover server. + Delete the failover associated with the given ID. + :param ip_id: ID of the failover IP to delete. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.delete_failover_ip( + ip_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ip_id = validate_path_param("ip_id", ip_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/zones/{param_zone}/failover-ips/{param_ip_id}", + ) + + self._throw_on_error(res) + + async def list_failover_i_ps( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListFailoverIPsRequestOrderBy] = None, + project_id: Optional[str] = None, + search: Optional[str] = None, + only_available: Optional[bool] = None, + ) -> ListFailoverIPsResponse: + """ + List failovers for project. + List failovers servers for project. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of failovers IP per page. + :param order_by: Order of the failovers IP. + :param project_id: Filter failovers IP by project ID. + :param search: Filter failovers IP which matching with this field. + :param only_available: True: return all failovers IP not attached on server + false: return all failovers IP attached on server. + :return: :class:`ListFailoverIPsResponse ` + + Usage: + :: + + result = await api.list_failover_i_ps() + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/failover-ips", + params={ + "only_available": only_available, + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "search": search, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListFailoverIPsResponse(res.json()) + + async def list_failover_i_ps_all( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListFailoverIPsRequestOrderBy] = None, + project_id: Optional[str] = None, + search: Optional[str] = None, + only_available: Optional[bool] = None, + ) -> List[FailoverIP]: + """ + List failovers for project. + List failovers servers for project. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of failovers IP per page. + :param order_by: Order of the failovers IP. + :param project_id: Filter failovers IP by project ID. + :param search: Filter failovers IP which matching with this field. + :param only_available: True: return all failovers IP not attached on server + false: return all failovers IP attached on server. + :return: :class:`List[FailoverIP] ` + + Usage: + :: + + result = await api.list_failover_i_ps_all() + """ + + return await fetch_all_pages_async( + type=ListFailoverIPsResponse, + key="failover_ips", + fetcher=self.list_failover_i_ps, + args={ + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + "search": search, + "only_available": only_available, + }, + ) + + async def get_failover_ip( + self, + *, + ip_id: int, + zone: Optional[Zone] = None, + ) -> FailoverIP: + """ + Get a specific baremetal server. + Get the server associated with the given ID. + :param ip_id: ID of the failover IP. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`FailoverIP ` + + Usage: + :: + + result = await api.get_failover_ip( + ip_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ip_id = validate_path_param("ip_id", ip_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/failover-ips/{param_ip_id}", + ) + + self._throw_on_error(res) + return unmarshal_FailoverIP(res.json()) + + async def get_remaining_quota( + self, + *, + zone: Optional[Zone] = None, + project_id: Optional[str] = None, + ) -> GetRemainingQuotaResponse: + """ + Get remaining quota. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param project_id: Project ID. + :return: :class:`GetRemainingQuotaResponse ` + + Usage: + :: + + result = await api.get_remaining_quota() + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/remaining-quota", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_GetRemainingQuotaResponse(res.json()) + + async def get_raid( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> Raid: + """ + Get raid. + Return raid for the given server ID. + :param server_id: ID of the server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Raid ` + + Usage: + :: + + result = await api.get_raid( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/raid", + ) + + self._throw_on_error(res) + return unmarshal_Raid(res.json()) + + async def update_raid( + self, + *, + server_id: int, + raid_arrays: List[UpdatableRaidArray], + zone: Optional[Zone] = None, + ) -> None: + """ + Update RAID. + Update RAID associated with the given server ID. + :param server_id: ID of the server. + :param raid_arrays: RAIDs to update. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.update_raid( + server_id=1, + raid_arrays=[], + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/update-raid", + body=marshal_UpdateRaidRequest( + UpdateRaidRequest( + server_id=server_id, + raid_arrays=raid_arrays, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def start_rescue( + self, + *, + server_id: int, + os_id: int, + zone: Optional[Zone] = None, + ) -> Rescue: + """ + Start in rescue baremetal server. + Start in rescue the server associated with the given ID. + :param server_id: ID of the server to start rescue. + :param os_id: OS ID to use to start rescue. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Rescue ` + + Usage: + :: + + result = await api.start_rescue( + server_id=1, + os_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/rescue", + body=marshal_StartRescueRequest( + StartRescueRequest( + server_id=server_id, + os_id=os_id, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Rescue(res.json()) + + async def get_rescue( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> Rescue: + """ + Get rescue information. + Return rescue information for the given server ID. + :param server_id: ID of the server to get rescue. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Rescue ` + + Usage: + :: + + result = await api.get_rescue( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/rescue", + ) + + self._throw_on_error(res) + return unmarshal_Rescue(res.json()) + + async def stop_rescue( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Stop rescue on baremetal server. + Stop rescue on the server associated with the given ID. + :param server_id: ID of the server to stop rescue. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = await api.stop_rescue( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/rescue", + ) + + self._throw_on_error(res) + + +class DediboxV1BillingAPI(API): + """ + Dedibox Phoenix Billing API. + """ + + async def list_invoices( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListInvoicesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListInvoicesResponse: + """ + :param page: + :param page_size: + :param order_by: + :param project_id: + :return: :class:`ListInvoicesResponse ` + + Usage: + :: + + result = await api.list_invoices() + """ + + res = self._request( + "GET", + "/dedibox/v1/invoices", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListInvoicesResponse(res.json()) + + async def list_invoices_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListInvoicesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[InvoiceSummary]: + """ + :param page: + :param page_size: + :param order_by: + :param project_id: + :return: :class:`List[InvoiceSummary] ` + + Usage: + :: + + result = await api.list_invoices_all() + """ + + return await fetch_all_pages_async( + type=ListInvoicesResponse, + key="invoices", + fetcher=self.list_invoices, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def get_invoice( + self, + *, + invoice_id: int, + ) -> Invoice: + """ + :param invoice_id: + :return: :class:`Invoice ` + + Usage: + :: + + result = await api.get_invoice( + invoice_id=1, + ) + """ + + param_invoice_id = validate_path_param("invoice_id", invoice_id) + + res = self._request( + "GET", + f"/dedibox/v1/invoices/{param_invoice_id}", + ) + + self._throw_on_error(res) + return unmarshal_Invoice(res.json()) + + async def download_invoice( + self, + *, + invoice_id: int, + ) -> ScwFile: + """ + :param invoice_id: + :return: :class:`ScwFile ` + + Usage: + :: + + result = await api.download_invoice( + invoice_id=1, + ) + """ + + param_invoice_id = validate_path_param("invoice_id", invoice_id) + + res = self._request( + "GET", + f"/dedibox/v1/invoices/{param_invoice_id}/download", + ) + + self._throw_on_error(res) + return unmarshal_ScwFile(res.json()) + + async def list_refunds( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRefundsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRefundsResponse: + """ + :param page: + :param page_size: + :param order_by: + :param project_id: + :return: :class:`ListRefundsResponse ` + + Usage: + :: + + result = await api.list_refunds() + """ + + res = self._request( + "GET", + "/dedibox/v1/refunds", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRefundsResponse(res.json()) + + async def list_refunds_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRefundsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RefundSummary]: + """ + :param page: + :param page_size: + :param order_by: + :param project_id: + :return: :class:`List[RefundSummary] ` + + Usage: + :: + + result = await api.list_refunds_all() + """ + + return await fetch_all_pages_async( + type=ListRefundsResponse, + key="refunds", + fetcher=self.list_refunds, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def get_refund( + self, + *, + refund_id: int, + ) -> Refund: + """ + :param refund_id: + :return: :class:`Refund ` + + Usage: + :: + + result = await api.get_refund( + refund_id=1, + ) + """ + + param_refund_id = validate_path_param("refund_id", refund_id) + + res = self._request( + "GET", + f"/dedibox/v1/refunds/{param_refund_id}", + ) + + self._throw_on_error(res) + return unmarshal_Refund(res.json()) + + async def download_refund( + self, + *, + refund_id: int, + ) -> ScwFile: + """ + :param refund_id: + :return: :class:`ScwFile ` + + Usage: + :: + + result = await api.download_refund( + refund_id=1, + ) + """ + + param_refund_id = validate_path_param("refund_id", refund_id) + + res = self._request( + "GET", + f"/dedibox/v1/refunds/{param_refund_id}/download", + ) + + self._throw_on_error(res) + return unmarshal_ScwFile(res.json()) + + async def can_order( + self, + *, + project_id: Optional[str] = None, + ) -> CanOrderResponse: + """ + :param project_id: + :return: :class:`CanOrderResponse ` + + Usage: + :: + + result = await api.can_order() + """ + + res = self._request( + "GET", + "/dedibox/v1/can-order", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_CanOrderResponse(res.json()) + + +class DediboxV1IPv6BlockAPI(API): + """ + Dedibox Phoenix IPv6 Block API. + """ + + async def get_i_pv6_block_quotas( + self, + *, + project_id: Optional[str] = None, + ) -> GetIPv6BlockQuotasResponse: + """ + Get IPv6 block quota. + Get IPv6 block quota with the given project ID. + /48 one per organization. + /56 link to your number of server. + /64 link to your number of failover IP. + :param project_id: ID of the project. + :return: :class:`GetIPv6BlockQuotasResponse ` + + Usage: + :: + + result = await api.get_i_pv6_block_quotas() + """ + + res = self._request( + "GET", + "/dedibox/v1/ipv6-block-quotas", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_GetIPv6BlockQuotasResponse(res.json()) + + async def create_i_pv6_block( + self, + *, + project_id: Optional[str] = None, + ) -> IPv6Block: + """ + Create IPv6 block for baremetal server. + Create IPv6 block associated with the given project ID. + :param project_id: ID of the project. + :return: :class:`IPv6Block ` + + Usage: + :: + + result = await api.create_i_pv6_block() + """ + + res = self._request( + "POST", + "/dedibox/v1/ipv6-block", + body=marshal_IPv6BlockApiCreateIPv6BlockRequest( + IPv6BlockApiCreateIPv6BlockRequest( + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_IPv6Block(res.json()) + + async def get_i_pv6_block( + self, + *, + project_id: Optional[str] = None, + ) -> IPv6Block: + """ + Get a specific IPv6 block. + Get the IPv6 block associated with the given ID. + :param project_id: ID of the project. + :return: :class:`IPv6Block ` + + Usage: + :: + + result = await api.get_i_pv6_block() + """ + + res = self._request( + "GET", + "/dedibox/v1/ipv6-block", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_IPv6Block(res.json()) + + async def update_i_pv6_block( + self, + *, + block_id: int, + nameservers: Optional[List[str]] = None, + ) -> IPv6Block: + """ + Update IPv6 block. + Update DNS associated to IPv6 block. + If DNS is used, minimum of 2 is necessary and maximum of 5 (no duplicate). + :param block_id: ID of the IPv6 block. + :param nameservers: DNS to link to the IPv6. + :return: :class:`IPv6Block ` + + Usage: + :: + + result = await api.update_i_pv6_block( + block_id=1, + ) + """ + + param_block_id = validate_path_param("block_id", block_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/ipv6-blocks/{param_block_id}", + body=marshal_IPv6BlockApiUpdateIPv6BlockRequest( + IPv6BlockApiUpdateIPv6BlockRequest( + block_id=block_id, + nameservers=nameservers, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_IPv6Block(res.json()) + + async def delete_i_pv6_block( + self, + *, + block_id: int, + ) -> None: + """ + Delete IPv6 block. + Delete IPv6 block subnet with the given ID. + :param block_id: ID of the IPv6 block to delete. + + Usage: + :: + + result = await api.delete_i_pv6_block( + block_id=1, + ) + """ + + param_block_id = validate_path_param("block_id", block_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/ipv6-blocks/{param_block_id}", + ) + + self._throw_on_error(res) + + async def create_i_pv6_block_subnet( + self, + *, + block_id: int, + address: str, + cidr: int, + ) -> IPv6Block: + """ + Create IPv6 block subnet. + Create IPv6 block subnet for the given IP ID. + /48 could create subnet in /56 (quota link to your number of server). + /56 could create subnet in /64 (quota link to your number of failover IP). + :param block_id: ID of the IPv6 block. + :param address: Address of the IPv6. + :param cidr: Classless InterDomain Routing notation of the IPv6. + :return: :class:`IPv6Block ` + + Usage: + :: + + result = await api.create_i_pv6_block_subnet( + block_id=1, + address="example", + cidr=1, + ) + """ + + param_block_id = validate_path_param("block_id", block_id) + + res = self._request( + "POST", + f"/dedibox/v1/ipv6-blocks/{param_block_id}/subnets", + body=marshal_IPv6BlockApiCreateIPv6BlockSubnetRequest( + IPv6BlockApiCreateIPv6BlockSubnetRequest( + block_id=block_id, + address=address, + cidr=cidr, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_IPv6Block(res.json()) + + async def list_i_pv6_block_subnets_available( + self, + *, + block_id: int, + ) -> ListIPv6BlockSubnetsAvailableResponse: + """ + List available IPv6 block subnets. + List all available IPv6 block subnets for given IP ID. + :param block_id: ID of the IPv6 block. + :return: :class:`ListIPv6BlockSubnetsAvailableResponse ` + + Usage: + :: + + result = await api.list_i_pv6_block_subnets_available( + block_id=1, + ) + """ + + param_block_id = validate_path_param("block_id", block_id) + + res = self._request( + "GET", + f"/dedibox/v1/ipv6-blocks/{param_block_id}/subnets", + ) + + self._throw_on_error(res) + return unmarshal_ListIPv6BlockSubnetsAvailableResponse(res.json()) + + +class DediboxV1RpnAPI(API): + """ + Dedibox Phoenix RPN API. + """ + + async def list_rpn_server_capabilities( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnServerCapabilitiesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnServerCapabilitiesResponse: + """ + :param page: Page number. + :param page_size: Number of servers per page. + :param order_by: Order of the servers. + :param project_id: Filter servers by project ID. + :return: :class:`ListRpnServerCapabilitiesResponse ` + + Usage: + :: + + result = await api.list_rpn_server_capabilities() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpn/server-capabilities", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnServerCapabilitiesResponse(res.json()) + + async def list_rpn_server_capabilities_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnServerCapabilitiesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnServerCapability]: + """ + :param page: Page number. + :param page_size: Number of servers per page. + :param order_by: Order of the servers. + :param project_id: Filter servers by project ID. + :return: :class:`List[RpnServerCapability] ` + + Usage: + :: + + result = await api.list_rpn_server_capabilities_all() + """ + + return await fetch_all_pages_async( + type=ListRpnServerCapabilitiesResponse, + key="servers", + fetcher=self.list_rpn_server_capabilities, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def get_rpn_status( + self, + *, + project_id: Optional[str] = None, + rpnv1_group_id: Optional[int] = None, + rpnv2_group_id: Optional[int] = None, + ) -> GetRpnStatusResponse: + """ + :param project_id: A project ID. + :param rpnv1_group_id: An RPN v1 group ID. + :param rpnv2_group_id: An RPN v2 group ID. + :return: :class:`GetRpnStatusResponse ` + + Usage: + :: + + result = await api.get_rpn_status() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpn/status", + params={ + "project_id": project_id or self.client.default_project_id, + "rpnv1_group_id": rpnv1_group_id, + "rpnv2_group_id": rpnv2_group_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_GetRpnStatusResponse(res.json()) + + +class DediboxV1RpnSanAPI(API): + """ + Dedibox Phoenix RPN SAN API. + """ + + async def list_rpn_sans( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnSansRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnSansResponse: + """ + :param page: Page number. + :param page_size: Number of RPN SANs per page. + :param order_by: Order of the RPN SANs. + :param project_id: Filter RPN SANs by project ID. + :return: :class:`ListRpnSansResponse ` + + Usage: + :: + + result = await api.list_rpn_sans() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpn-sans", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnSansResponse(res.json()) + + async def list_rpn_sans_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnSansRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnSanSummary]: + """ + :param page: Page number. + :param page_size: Number of RPN SANs per page. + :param order_by: Order of the RPN SANs. + :param project_id: Filter RPN SANs by project ID. + :return: :class:`List[RpnSanSummary] ` + + Usage: + :: + + result = await api.list_rpn_sans_all() + """ + + return await fetch_all_pages_async( + type=ListRpnSansResponse, + key="rpn_sans", + fetcher=self.list_rpn_sans, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def get_rpn_san( + self, + *, + rpn_san_id: int, + ) -> RpnSan: + """ + :param rpn_san_id: RPN SAN ID. + :return: :class:`RpnSan ` + + Usage: + :: + + result = await api.get_rpn_san( + rpn_san_id=1, + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}", + ) + + self._throw_on_error(res) + return unmarshal_RpnSan(res.json()) + + async def wait_for_rpn_san( + self, + *, + rpn_san_id: int, + options: Optional[WaitForOptions[RpnSan, Union[bool, Awaitable[bool]]]] = None, + ) -> RpnSan: + """ + :param rpn_san_id: RPN SAN ID. + :return: :class:`RpnSan ` + + Usage: + :: + + result = await api.get_rpn_san( + rpn_san_id=1, + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = lambda res: res.status not in RPN_SAN_TRANSIENT_STATUSES + + return await wait_for_resource_async( + fetcher=self.get_rpn_san, + options=options, + args={ + "rpn_san_id": rpn_san_id, + }, + ) + + async def delete_rpn_san( + self, + *, + rpn_san_id: int, + ) -> None: + """ + :param rpn_san_id: RPN SAN ID. + + Usage: + :: + + result = await api.delete_rpn_san( + rpn_san_id=1, + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}", + ) + + self._throw_on_error(res) + + async def create_rpn_san( + self, + *, + offer_id: int, + project_id: Optional[str] = None, + ) -> Service: + """ + :param offer_id: Offer ID. + :param project_id: Your project ID. + :return: :class:`Service ` + + Usage: + :: + + result = await api.create_rpn_san( + offer_id=1, + ) + """ + + res = self._request( + "POST", + "/dedibox/v1/rpn-sans", + body=marshal_RpnSanApiCreateRpnSanRequest( + RpnSanApiCreateRpnSanRequest( + offer_id=offer_id, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + async def list_ips( + self, + *, + rpn_san_id: int, + type_: Optional[RpnSanIpType] = None, + ) -> ListIpsResponse: + """ + :param rpn_san_id: RPN SAN ID. + :param type_: Filter by IP type (server | rpnv2_subnet). + :return: :class:`ListIpsResponse ` + + Usage: + :: + + result = await api.list_ips( + rpn_san_id=1, + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}/ips", + params={ + "type": type_, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListIpsResponse(res.json()) + + async def add_ip( + self, + *, + rpn_san_id: int, + ip_ids: List[int], + ) -> None: + """ + :param rpn_san_id: RPN SAN ID. + :param ip_ids: An array of IP ID. + + Usage: + :: + + result = await api.add_ip( + rpn_san_id=1, + ip_ids=[], + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}/ips", + body=marshal_RpnSanApiAddIpRequest( + RpnSanApiAddIpRequest( + rpn_san_id=rpn_san_id, + ip_ids=ip_ids, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def remove_ip( + self, + *, + rpn_san_id: int, + ip_ids: List[int], + ) -> None: + """ + :param rpn_san_id: RPN SAN ID. + :param ip_ids: An array of IP ID. + + Usage: + :: + + result = await api.remove_ip( + rpn_san_id=1, + ip_ids=[], + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}/ips", + body=marshal_RpnSanApiRemoveIpRequest( + RpnSanApiRemoveIpRequest( + rpn_san_id=rpn_san_id, + ip_ids=ip_ids, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def list_available_ips( + self, + *, + rpn_san_id: int, + type_: Optional[RpnSanIpType] = None, + ) -> ListIpsResponse: + """ + :param rpn_san_id: RPN SAN ID. + :param type_: Filter by IP type (server | rpnv2_subnet). + :return: :class:`ListIpsResponse ` + + Usage: + :: + + result = await api.list_available_ips( + rpn_san_id=1, + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}/available-ips", + params={ + "type": type_, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListIpsResponse(res.json()) + + +class DediboxV1RpnV1API(API): + """ + Dedibox Phoenix RPN v1 API. + """ + + async def list_rpn_groups( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnGroupsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnGroupsResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v1 groups per page. + :param order_by: Order of the rpn v1 groups. + :param project_id: Filter rpn v1 groups by project ID. + :return: :class:`ListRpnGroupsResponse ` + + Usage: + :: + + result = await api.list_rpn_groups() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv1/groups", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnGroupsResponse(res.json()) + + async def list_rpn_groups_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnGroupsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnGroup]: + """ + :param page: Page number. + :param page_size: Number of rpn v1 groups per page. + :param order_by: Order of the rpn v1 groups. + :param project_id: Filter rpn v1 groups by project ID. + :return: :class:`List[RpnGroup] ` + + Usage: + :: + + result = await api.list_rpn_groups_all() + """ + + return await fetch_all_pages_async( + type=ListRpnGroupsResponse, + key="rpn_groups", + fetcher=self.list_rpn_groups, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def get_rpn_group( + self, + *, + group_id: int, + ) -> RpnGroup: + """ + :param group_id: Rpn v1 group ID. + :return: :class:`RpnGroup ` + + Usage: + :: + + result = await api.get_rpn_group( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpnv1/groups/{param_group_id}", + ) + + self._throw_on_error(res) + return unmarshal_RpnGroup(res.json()) + + async def create_rpn_group( + self, + *, + name: str, + server_ids: Optional[List[int]] = None, + san_server_ids: Optional[List[int]] = None, + project_id: Optional[str] = None, + ) -> RpnGroup: + """ + :param name: Rpn v1 group name. + :param server_ids: A collection of rpn v1 capable servers. + :param san_server_ids: A collection of rpn v1 capable rpn sans servers. + :param project_id: A project ID. + :return: :class:`RpnGroup ` + + Usage: + :: + + result = await api.create_rpn_group( + name="example", + ) + """ + + res = self._request( + "POST", + "/dedibox/v1/rpnv1/groups", + body=marshal_RpnV1ApiCreateRpnGroupRequest( + RpnV1ApiCreateRpnGroupRequest( + name=name, + server_ids=server_ids, + san_server_ids=san_server_ids, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnGroup(res.json()) + + async def delete_rpn_group( + self, + *, + group_id: int, + ) -> None: + """ + :param group_id: Rpn v1 group ID. + + Usage: + :: + + result = await api.delete_rpn_group( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpnv1/groups/{param_group_id}", + ) + + self._throw_on_error(res) + + async def update_rpn_group_name( + self, + *, + group_id: int, + name: Optional[str] = None, + ) -> RpnGroup: + """ + :param group_id: Rpn v1 group ID. + :param name: New rpn v1 group name. + :return: :class:`RpnGroup ` + + Usage: + :: + + result = await api.update_rpn_group_name( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/rpnv1/groups/{param_group_id}", + body=marshal_RpnV1ApiUpdateRpnGroupNameRequest( + RpnV1ApiUpdateRpnGroupNameRequest( + group_id=group_id, + name=name, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnGroup(res.json()) + + async def list_rpn_group_members( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnGroupMembersRequestOrderBy] = None, + group_id: int, + project_id: Optional[str] = None, + ) -> ListRpnGroupMembersResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v1 group members per page. + :param order_by: Order of the rpn v1 group members. + :param group_id: Filter rpn v1 group members by group ID. + :param project_id: A project ID. + :return: :class:`ListRpnGroupMembersResponse ` + + Usage: + :: + + result = await api.list_rpn_group_members( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpnv1/groups/{param_group_id}/members", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnGroupMembersResponse(res.json()) + + async def list_rpn_group_members_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnGroupMembersRequestOrderBy] = None, + group_id: int, + project_id: Optional[str] = None, + ) -> List[RpnGroupMember]: + """ + :param page: Page number. + :param page_size: Number of rpn v1 group members per page. + :param order_by: Order of the rpn v1 group members. + :param group_id: Filter rpn v1 group members by group ID. + :param project_id: A project ID. + :return: :class:`List[RpnGroupMember] ` + + Usage: + :: + + result = await api.list_rpn_group_members_all( + group_id=1, + ) + """ + + return await fetch_all_pages_async( + type=ListRpnGroupMembersResponse, + key="members", + fetcher=self.list_rpn_group_members, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "group_id": group_id, + "project_id": project_id, + }, + ) + + async def rpn_group_invite( + self, + *, + group_id: int, + server_ids: List[int], + project_id: Optional[str] = None, + ) -> None: + """ + :param group_id: The RPN V1 group ID. + :param server_ids: A collection of external server IDs. + :param project_id: A project ID. + + Usage: + :: + + result = await api.rpn_group_invite( + group_id=1, + server_ids=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv1/groups/{param_group_id}/invite", + body=marshal_RpnV1ApiRpnGroupInviteRequest( + RpnV1ApiRpnGroupInviteRequest( + group_id=group_id, + server_ids=server_ids, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def leave_rpn_group( + self, + *, + group_id: int, + member_ids: List[int], + project_id: Optional[str] = None, + ) -> None: + """ + :param group_id: The RPN V1 group ID. + :param member_ids: A collection of rpn v1 group members IDs. + :param project_id: A project ID. + + Usage: + :: + + result = await api.leave_rpn_group( + group_id=1, + member_ids=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv1/groups/{param_group_id}/leave", + body=marshal_RpnV1ApiLeaveRpnGroupRequest( + RpnV1ApiLeaveRpnGroupRequest( + group_id=group_id, + member_ids=member_ids, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def add_rpn_group_members( + self, + *, + group_id: int, + server_ids: Optional[List[int]] = None, + san_server_ids: Optional[List[int]] = None, + ) -> RpnGroup: + """ + :param group_id: The rpn v1 group ID. + :param server_ids: A collection of rpn v1 capable server IDs. + :param san_server_ids: A collection of rpn v1 capable RPN SAN server IDs. + :return: :class:`RpnGroup ` + + Usage: + :: + + result = await api.add_rpn_group_members( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv1/groups/{param_group_id}/members", + body=marshal_RpnV1ApiAddRpnGroupMembersRequest( + RpnV1ApiAddRpnGroupMembersRequest( + group_id=group_id, + server_ids=server_ids, + san_server_ids=san_server_ids, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnGroup(res.json()) + + async def delete_rpn_group_members( + self, + *, + group_id: int, + member_ids: List[int], + ) -> RpnGroup: + """ + :param group_id: The rpn v1 group ID. + :param member_ids: A collection of rpn v1 group members IDs. + :return: :class:`RpnGroup ` + + Usage: + :: + + result = await api.delete_rpn_group_members( + group_id=1, + member_ids=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpnv1/groups/{param_group_id}/members", + body=marshal_RpnV1ApiDeleteRpnGroupMembersRequest( + RpnV1ApiDeleteRpnGroupMembersRequest( + group_id=group_id, + member_ids=member_ids, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnGroup(res.json()) + + async def list_rpn_capable_servers( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnCapableServersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnCapableServersResponse: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`ListRpnCapableServersResponse ` + + Usage: + :: + + result = await api.list_rpn_capable_servers() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv1/capable-servers", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnCapableServersResponse(res.json()) + + async def list_rpn_capable_servers_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnCapableServersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[Server]: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`List[Server] ` + + Usage: + :: + + result = await api.list_rpn_capable_servers_all() + """ + + return await fetch_all_pages_async( + type=ListRpnCapableServersResponse, + key="servers", + fetcher=self.list_rpn_capable_servers, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def list_rpn_capable_san_servers( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnCapableSanServersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnCapableSanServersResponse: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`ListRpnCapableSanServersResponse ` + + Usage: + :: + + result = await api.list_rpn_capable_san_servers() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv1/capable-san-servers", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnCapableSanServersResponse(res.json()) + + async def list_rpn_capable_san_servers_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnCapableSanServersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnSanServer]: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`List[RpnSanServer] ` + + Usage: + :: + + result = await api.list_rpn_capable_san_servers_all() + """ + + return await fetch_all_pages_async( + type=ListRpnCapableSanServersResponse, + key="san_servers", + fetcher=self.list_rpn_capable_san_servers, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def list_rpn_invites( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnInvitesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnInvitesResponse: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`ListRpnInvitesResponse ` + + Usage: + :: + + result = await api.list_rpn_invites() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv1/invites", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnInvitesResponse(res.json()) + + async def list_rpn_invites_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnInvitesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnGroupMember]: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`List[RpnGroupMember] ` + + Usage: + :: + + result = await api.list_rpn_invites_all() + """ + + return await fetch_all_pages_async( + type=ListRpnInvitesResponse, + key="members", + fetcher=self.list_rpn_invites, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def accept_rpn_invite( + self, + *, + member_id: int, + ) -> None: + """ + :param member_id: The member ID. + + Usage: + :: + + result = await api.accept_rpn_invite( + member_id=1, + ) + """ + + param_member_id = validate_path_param("member_id", member_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv1/invites/{param_member_id}/accept", + ) + + self._throw_on_error(res) + + async def refuse_rpn_invite( + self, + *, + member_id: int, + ) -> None: + """ + :param member_id: The member ID. + + Usage: + :: + + result = await api.refuse_rpn_invite( + member_id=1, + ) + """ + + param_member_id = validate_path_param("member_id", member_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv1/invites/{param_member_id}/refuse", + ) + + self._throw_on_error(res) + + +class DediboxV1RpnV2API(API): + """ + Dedibox Phoenix RPN v2 API. + """ + + async def list_rpn_v2_groups( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2GroupsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnV2GroupsResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v2 groups per page. + :param order_by: Order of the rpn v2 groups. + :param project_id: Filter rpn v2 groups by project ID. + :return: :class:`ListRpnV2GroupsResponse ` + + Usage: + :: + + result = await api.list_rpn_v2_groups() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv2/groups", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnV2GroupsResponse(res.json()) + + async def list_rpn_v2_groups_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2GroupsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnV2Group]: + """ + :param page: Page number. + :param page_size: Number of rpn v2 groups per page. + :param order_by: Order of the rpn v2 groups. + :param project_id: Filter rpn v2 groups by project ID. + :return: :class:`List[RpnV2Group] ` + + Usage: + :: + + result = await api.list_rpn_v2_groups_all() + """ + + return await fetch_all_pages_async( + type=ListRpnV2GroupsResponse, + key="rpn_groups", + fetcher=self.list_rpn_v2_groups, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def list_rpn_v2_members( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2MembersRequestOrderBy] = None, + group_id: int, + type_: Optional[ListRpnV2MembersRequestType] = None, + ) -> ListRpnV2MembersResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v2 group members per page. + :param order_by: Order of the rpn v2 group members. + :param group_id: RPN V2 group ID. + :param type_: Filter members by type. + :return: :class:`ListRpnV2MembersResponse ` + + Usage: + :: + + result = await api.list_rpn_v2_members( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/members", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "type": type_, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnV2MembersResponse(res.json()) + + async def list_rpn_v2_members_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2MembersRequestOrderBy] = None, + group_id: int, + type_: Optional[ListRpnV2MembersRequestType] = None, + ) -> List[RpnV2Member]: + """ + :param page: Page number. + :param page_size: Number of rpn v2 group members per page. + :param order_by: Order of the rpn v2 group members. + :param group_id: RPN V2 group ID. + :param type_: Filter members by type. + :return: :class:`List[RpnV2Member] ` + + Usage: + :: + + result = await api.list_rpn_v2_members_all( + group_id=1, + ) + """ + + return await fetch_all_pages_async( + type=ListRpnV2MembersResponse, + key="members", + fetcher=self.list_rpn_v2_members, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "group_id": group_id, + "type_": type_, + }, + ) + + async def get_rpn_v2_group( + self, + *, + group_id: int, + ) -> RpnV2Group: + """ + :param group_id: RPN V2 group ID. + :return: :class:`RpnV2Group ` + + Usage: + :: + + result = await api.get_rpn_v2_group( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpnv2/groups/{param_group_id}", + ) + + self._throw_on_error(res) + return unmarshal_RpnV2Group(res.json()) + + async def wait_for_rpn_v2_group( + self, + *, + group_id: int, + options: Optional[ + WaitForOptions[RpnV2Group, Union[bool, Awaitable[bool]]] + ] = None, + ) -> RpnV2Group: + """ + :param group_id: RPN V2 group ID. + :return: :class:`RpnV2Group ` + + Usage: + :: + + result = await api.get_rpn_v2_group( + group_id=1, + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = lambda res: res.status not in RPN_V2_GROUP_TRANSIENT_STATUSES + + return await wait_for_resource_async( + fetcher=self.get_rpn_v2_group, + options=options, + args={ + "group_id": group_id, + }, + ) + + async def create_rpn_v2_group( + self, + *, + name: str, + servers: List[int], + project_id: Optional[str] = None, + type_: Optional[RpnV2GroupType] = None, + ) -> RpnV2Group: + """ + :param name: RPN V2 group name. + :param servers: A collection of server IDs. + :param project_id: Project ID of the RPN V2 group. + :param type_: RPN V2 group type (qing / standard). + :return: :class:`RpnV2Group ` + + Usage: + :: + + result = await api.create_rpn_v2_group( + name="example", + servers=[], + ) + """ + + res = self._request( + "POST", + "/dedibox/v1/rpnv2/groups", + body=marshal_RpnV2ApiCreateRpnV2GroupRequest( + RpnV2ApiCreateRpnV2GroupRequest( + name=name, + servers=servers, + project_id=project_id, + type_=type_, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnV2Group(res.json()) + + async def delete_rpn_v2_group( + self, + *, + group_id: int, + ) -> None: + """ + :param group_id: RPN V2 group ID. + + Usage: + :: + + result = await api.delete_rpn_v2_group( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpnv2/groups/{param_group_id}", + ) + + self._throw_on_error(res) + + async def update_rpn_v2_group_name( + self, + *, + group_id: int, + name: Optional[str] = None, + ) -> RpnV2Group: + """ + :param group_id: RPN V2 group ID. + :param name: RPN V2 group name. + :return: :class:`RpnV2Group ` + + Usage: + :: + + result = await api.update_rpn_v2_group_name( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/rpnv2/groups/{param_group_id}", + body=marshal_RpnV2ApiUpdateRpnV2GroupNameRequest( + RpnV2ApiUpdateRpnV2GroupNameRequest( + group_id=group_id, + name=name, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnV2Group(res.json()) + + async def add_rpn_v2_members( + self, + *, + group_id: int, + servers: List[int], + ) -> None: + """ + :param group_id: RPN V2 group ID. + :param servers: A collection of server IDs. + + Usage: + :: + + result = await api.add_rpn_v2_members( + group_id=1, + servers=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/members", + body=marshal_RpnV2ApiAddRpnV2MembersRequest( + RpnV2ApiAddRpnV2MembersRequest( + group_id=group_id, + servers=servers, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def delete_rpn_v2_members( + self, + *, + group_id: int, + member_ids: List[int], + ) -> None: + """ + :param group_id: RPN V2 group ID. + :param member_ids: A collection of member IDs. + + Usage: + :: + + result = await api.delete_rpn_v2_members( + group_id=1, + member_ids=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/members", + body=marshal_RpnV2ApiDeleteRpnV2MembersRequest( + RpnV2ApiDeleteRpnV2MembersRequest( + group_id=group_id, + member_ids=member_ids, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def list_rpn_v2_capable_resources( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2CapableResourcesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnV2CapableResourcesResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v2 capable resources per page. + :param order_by: Order of the rpn v2 capable resources. + :param project_id: Filter rpn v2 capable resources by project ID. + :return: :class:`ListRpnV2CapableResourcesResponse ` + + Usage: + :: + + result = await api.list_rpn_v2_capable_resources() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv2/groups/capable", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnV2CapableResourcesResponse(res.json()) + + async def list_rpn_v2_capable_resources_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2CapableResourcesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[Server]: + """ + :param page: Page number. + :param page_size: Number of rpn v2 capable resources per page. + :param order_by: Order of the rpn v2 capable resources. + :param project_id: Filter rpn v2 capable resources by project ID. + :return: :class:`List[Server] ` + + Usage: + :: + + result = await api.list_rpn_v2_capable_resources_all() + """ + + return await fetch_all_pages_async( + type=ListRpnV2CapableResourcesResponse, + key="servers", + fetcher=self.list_rpn_v2_capable_resources, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def list_rpn_v2_group_logs( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2GroupLogsRequestOrderBy] = None, + group_id: int, + ) -> ListRpnV2GroupLogsResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v2 group logs per page. + :param order_by: Order of the rpn v2 group logs. + :param group_id: RPN V2 group ID. + :return: :class:`ListRpnV2GroupLogsResponse ` + + Usage: + :: + + result = await api.list_rpn_v2_group_logs( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/logs", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnV2GroupLogsResponse(res.json()) + + async def list_rpn_v2_group_logs_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2GroupLogsRequestOrderBy] = None, + group_id: int, + ) -> List[Log]: + """ + :param page: Page number. + :param page_size: Number of rpn v2 group logs per page. + :param order_by: Order of the rpn v2 group logs. + :param group_id: RPN V2 group ID. + :return: :class:`List[Log] ` + + Usage: + :: + + result = await api.list_rpn_v2_group_logs_all( + group_id=1, + ) + """ + + return await fetch_all_pages_async( + type=ListRpnV2GroupLogsResponse, + key="logs", + fetcher=self.list_rpn_v2_group_logs, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "group_id": group_id, + }, + ) + + async def update_rpn_v2_vlan_for_members( + self, + *, + group_id: int, + member_ids: List[int], + vlan: Optional[int] = None, + ) -> None: + """ + :param group_id: RPN V2 group ID. + :param member_ids: RPN V2 member IDs. + :param vlan: Min: 0. + Max: 3967. + + Usage: + :: + + result = await api.update_rpn_v2_vlan_for_members( + group_id=1, + member_ids=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/vlan", + body=marshal_RpnV2ApiUpdateRpnV2VlanForMembersRequest( + RpnV2ApiUpdateRpnV2VlanForMembersRequest( + group_id=group_id, + member_ids=member_ids, + vlan=vlan, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def enable_rpn_v2_group_compatibility( + self, + *, + group_id: int, + rpnv1_group_id: int, + ) -> None: + """ + :param group_id: RPN V2 group ID. + :param rpnv1_group_id: RPN V1 group ID. + + Usage: + :: + + result = await api.enable_rpn_v2_group_compatibility( + group_id=1, + rpnv1_group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/enable-compatibility", + body=marshal_RpnV2ApiEnableRpnV2GroupCompatibilityRequest( + RpnV2ApiEnableRpnV2GroupCompatibilityRequest( + group_id=group_id, + rpnv1_group_id=rpnv1_group_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def disable_rpn_v2_group_compatibility( + self, + *, + group_id: int, + ) -> None: + """ + :param group_id: RPN V2 group ID. + + Usage: + :: + + result = await api.disable_rpn_v2_group_compatibility( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/disable-compatibility", + body={}, + ) + + self._throw_on_error(res) diff --git a/scaleway-async/scaleway_async/dedibox/v1/content.py b/scaleway-async/scaleway_async/dedibox/v1/content.py new file mode 100644 index 000000000..443067327 --- /dev/null +++ b/scaleway-async/scaleway_async/dedibox/v1/content.py @@ -0,0 +1,86 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from typing import List + +from .types import ( + BMCAccessStatus, + IPv6BlockDelegationStatus, + RpnGroupMemberStatus, + RpnSanStatus, + RpnV2GroupStatus, + RpnV2MemberStatus, + ServerInstallStatus, + ServerStatus, + ServiceProvisioningStatus, +) + +BMC_ACCESS_TRANSIENT_STATUSES: List[BMCAccessStatus] = [ + BMCAccessStatus.CREATING, + BMCAccessStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`BMCAccessStatus `. +""" +I_PV6_BLOCK_DELEGATION_TRANSIENT_STATUSES: List[IPv6BlockDelegationStatus] = [ + IPv6BlockDelegationStatus.UPDATING, +] +""" +Lists transient statutes of the enum :class:`IPv6BlockDelegationStatus `. +""" +RPN_GROUP_MEMBER_TRANSIENT_STATUSES: List[RpnGroupMemberStatus] = [ + RpnGroupMemberStatus.CREATING, + RpnGroupMemberStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`RpnGroupMemberStatus `. +""" +RPN_SAN_TRANSIENT_STATUSES: List[RpnSanStatus] = [ + RpnSanStatus.CREATING, + RpnSanStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`RpnSanStatus `. +""" +RPN_V2_GROUP_TRANSIENT_STATUSES: List[RpnV2GroupStatus] = [ + RpnV2GroupStatus.CREATING, + RpnV2GroupStatus.UPDATING, + RpnV2GroupStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`RpnV2GroupStatus `. +""" +RPN_V2_MEMBER_TRANSIENT_STATUSES: List[RpnV2MemberStatus] = [ + RpnV2MemberStatus.CREATING, + RpnV2MemberStatus.UPDATING, + RpnV2MemberStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`RpnV2MemberStatus `. +""" +SERVER_INSTALL_TRANSIENT_STATUSES: List[ServerInstallStatus] = [ + ServerInstallStatus.BOOTING, + ServerInstallStatus.SETTING_UP_RAID, + ServerInstallStatus.PARTITIONING, + ServerInstallStatus.FORMATTING, + ServerInstallStatus.INSTALLING, + ServerInstallStatus.CONFIGURING, + ServerInstallStatus.CONFIGURING_BOOTLOADER, + ServerInstallStatus.REBOOTING, +] +""" +Lists transient statutes of the enum :class:`ServerInstallStatus `. +""" +SERVER_TRANSIENT_STATUSES: List[ServerStatus] = [ + ServerStatus.DELIVERING, + ServerStatus.INSTALLING, +] +""" +Lists transient statutes of the enum :class:`ServerStatus `. +""" +SERVICE_PROVISIONING_TRANSIENT_STATUSES: List[ServiceProvisioningStatus] = [ + ServiceProvisioningStatus.DELIVERING, + ServiceProvisioningStatus.EXPIRING, +] +""" +Lists transient statutes of the enum :class:`ServiceProvisioningStatus `. +""" diff --git a/scaleway-async/scaleway_async/dedibox/v1/marshalling.py b/scaleway-async/scaleway_async/dedibox/v1/marshalling.py new file mode 100644 index 000000000..4452a9a73 --- /dev/null +++ b/scaleway-async/scaleway_async/dedibox/v1/marshalling.py @@ -0,0 +1,3523 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import Any, Dict +from dateutil import parser + +from scaleway_core.profile import ProfileDefaults +from scaleway_core.bridge import ( + unmarshal_Money, +) +from .types import ( + OfferServerInfoStock, + PartitionFileSystem, + IP, + CPU, + Disk, + Memory, + PersistentMemory, + RaidController, + OfferAntiDosInfo, + OfferBackupInfo, + OfferBandwidthInfo, + OfferFailoverBlockInfo, + OfferFailoverIpInfo, + OfferLicenseInfo, + OfferRPNInfo, + OfferSANInfo, + OfferServerInfo, + OfferServiceLevelInfo, + OfferStorageInfo, + Offer, + OS, + RpnSan, + RpnGroup, + NetworkInterface, + ServerLocation, + ServerOption, + ServiceLevel, + Server, + RpnV2GroupSubnet, + RpnV2Group, + Service, + FailoverBlock, + FailoverIP, + BMCAccess, + Backup, + CanOrderResponse, + CreateFailoverIPsResponse, + GetIPv6BlockQuotasResponseQuota, + GetIPv6BlockQuotasResponse, + GetRemainingQuotaResponse, + GetRpnStatusResponse, + IPv6Block, + Invoice, + ListFailoverIPsResponse, + ListIPv6BlockSubnetsAvailableResponseSubnet, + ListIPv6BlockSubnetsAvailableResponse, + InvoiceSummary, + ListInvoicesResponse, + RpnSanIpRpnV2Group, + RpnSanIpServer, + RpnSanIp, + ListIpsResponse, + ListOSResponse, + ListOffersResponse, + RefundSummary, + ListRefundsResponse, + RpnSanServer, + ListRpnCapableSanServersResponse, + ListRpnCapableServersResponse, + RpnGroupMember, + ListRpnGroupMembersResponse, + ListRpnGroupsResponse, + ListRpnInvitesResponse, + RpnSanSummary, + ListRpnSansResponse, + RpnServerCapability, + ListRpnServerCapabilitiesResponse, + ListRpnV2CapableResourcesResponse, + RpnV2Member, + Log, + ListRpnV2GroupLogsResponse, + ListRpnV2GroupsResponse, + ListRpnV2MembersResponse, + ServerDisk, + ListServerDisksResponse, + ServerEvent, + ListServerEventsResponse, + ServerSummary, + ListServersResponse, + ListServicesResponse, + ListSubscribableServerOptionsResponse, + RaidArray, + Raid, + Refund, + Rescue, + Partition, + ServerDefaultPartitioning, + ServerInstall, + SubscribeStorageOptionsResponse, + AttachFailoverIPToMacAddressRequest, + AttachFailoverIPsRequest, + CreateFailoverIPsRequest, + CreateServerRequest, + DetachFailoverIPsRequest, + IPv6BlockApiCreateIPv6BlockRequest, + IPv6BlockApiCreateIPv6BlockSubnetRequest, + IPv6BlockApiUpdateIPv6BlockRequest, + InstallPartition, + InstallServerRequest, + RpnSanApiAddIpRequest, + RpnSanApiCreateRpnSanRequest, + RpnSanApiRemoveIpRequest, + RpnV1ApiAddRpnGroupMembersRequest, + RpnV1ApiCreateRpnGroupRequest, + RpnV1ApiDeleteRpnGroupMembersRequest, + RpnV1ApiLeaveRpnGroupRequest, + RpnV1ApiRpnGroupInviteRequest, + RpnV1ApiUpdateRpnGroupNameRequest, + RpnV2ApiAddRpnV2MembersRequest, + RpnV2ApiCreateRpnV2GroupRequest, + RpnV2ApiDeleteRpnV2MembersRequest, + RpnV2ApiEnableRpnV2GroupCompatibilityRequest, + RpnV2ApiUpdateRpnV2GroupNameRequest, + RpnV2ApiUpdateRpnV2VlanForMembersRequest, + StartBMCAccessRequest, + StartRescueRequest, + SubscribeServerOptionRequest, + SubscribeStorageOptionsRequest, + UpdatableRaidArray, + UpdateRaidRequest, + UpdateReverseRequest, + UpdateServerBackupRequest, + UpdateServerRequest, + UpdateServerTagsRequest, +) + + +def unmarshal_IP(data: Any) -> IP: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'IP' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("ip_id", None) + if field is not None: + args["ip_id"] = field + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("reverse", None) + if field is not None: + args["reverse"] = field + + field = data.get("version", None) + if field is not None: + args["version"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + field = data.get("netmask", None) + if field is not None: + args["netmask"] = field + + field = data.get("semantic", None) + if field is not None: + args["semantic"] = field + + field = data.get("gateway", None) + if field is not None: + args["gateway"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + return IP(**args) + + +def unmarshal_CPU(data: Any) -> CPU: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'CPU' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("core_count", None) + if field is not None: + args["core_count"] = field + + field = data.get("thread_count", None) + if field is not None: + args["thread_count"] = field + + field = data.get("frequency", None) + if field is not None: + args["frequency"] = field + + return CPU(**args) + + +def unmarshal_Disk(data: Any) -> Disk: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Disk' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("capacity", None) + if field is not None: + args["capacity"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + return Disk(**args) + + +def unmarshal_Memory(data: Any) -> Memory: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Memory' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("capacity", None) + if field is not None: + args["capacity"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("frequency", None) + if field is not None: + args["frequency"] = field + + field = data.get("is_ecc", None) + if field is not None: + args["is_ecc"] = field + + return Memory(**args) + + +def unmarshal_PersistentMemory(data: Any) -> PersistentMemory: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'PersistentMemory' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("capacity", None) + if field is not None: + args["capacity"] = field + + field = data.get("frequency", None) + if field is not None: + args["frequency"] = field + + field = data.get("model", None) + if field is not None: + args["model"] = field + + return PersistentMemory(**args) + + +def unmarshal_RaidController(data: Any) -> RaidController: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RaidController' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("model", None) + if field is not None: + args["model"] = field + + field = data.get("raid_level", None) + if field is not None: + args["raid_level"] = field + + return RaidController(**args) + + +def unmarshal_OfferAntiDosInfo(data: Any) -> OfferAntiDosInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferAntiDosInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + return OfferAntiDosInfo(**args) + + +def unmarshal_OfferBackupInfo(data: Any) -> OfferBackupInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferBackupInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("size", None) + if field is not None: + args["size"] = field + + return OfferBackupInfo(**args) + + +def unmarshal_OfferBandwidthInfo(data: Any) -> OfferBandwidthInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferBandwidthInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("speed", None) + if field is not None: + args["speed"] = field + + return OfferBandwidthInfo(**args) + + +def unmarshal_OfferFailoverBlockInfo(data: Any) -> OfferFailoverBlockInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferFailoverBlockInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("onetime_fees", None) + if field is not None: + args["onetime_fees"] = unmarshal_Offer(field) + + return OfferFailoverBlockInfo(**args) + + +def unmarshal_OfferFailoverIpInfo(data: Any) -> OfferFailoverIpInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferFailoverIpInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("onetime_fees", None) + if field is not None: + args["onetime_fees"] = unmarshal_Offer(field) + + return OfferFailoverIpInfo(**args) + + +def unmarshal_OfferLicenseInfo(data: Any) -> OfferLicenseInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferLicenseInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("bound_to_ip", None) + if field is not None: + args["bound_to_ip"] = field + + return OfferLicenseInfo(**args) + + +def unmarshal_OfferRPNInfo(data: Any) -> OfferRPNInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferRPNInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("speed", None) + if field is not None: + args["speed"] = field + + return OfferRPNInfo(**args) + + +def unmarshal_OfferSANInfo(data: Any) -> OfferSANInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferSANInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("size", None) + if field is not None: + args["size"] = field + + field = data.get("ha", None) + if field is not None: + args["ha"] = field + + field = data.get("device_type", None) + if field is not None: + args["device_type"] = field + + return OfferSANInfo(**args) + + +def unmarshal_OfferServerInfo(data: Any) -> OfferServerInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferServerInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("bandwidth", None) + if field is not None: + args["bandwidth"] = field + + field = data.get("stock", None) + if field is not None: + args["stock"] = field + + field = data.get("commercial_range", None) + if field is not None: + args["commercial_range"] = field + + field = data.get("disks", None) + if field is not None: + args["disks"] = ( + [unmarshal_Disk(v) for v in field] if field is not None else None + ) + + field = data.get("cpus", None) + if field is not None: + args["cpus"] = [unmarshal_CPU(v) for v in field] if field is not None else None + + field = data.get("memories", None) + if field is not None: + args["memories"] = ( + [unmarshal_Memory(v) for v in field] if field is not None else None + ) + + field = data.get("persistent_memories", None) + if field is not None: + args["persistent_memories"] = ( + [unmarshal_PersistentMemory(v) for v in field] + if field is not None + else None + ) + + field = data.get("raid_controllers", None) + if field is not None: + args["raid_controllers"] = ( + [unmarshal_RaidController(v) for v in field] if field is not None else None + ) + + field = data.get("available_options", None) + if field is not None: + args["available_options"] = ( + [unmarshal_Offer(v) for v in field] if field is not None else None + ) + + field = data.get("connectivity", None) + if field is not None: + args["connectivity"] = field + + field = data.get("stock_by_datacenter", None) + if field is not None: + args["stock_by_datacenter"] = ( + {key: OfferServerInfoStock(value) for key, value in field.items()} + if field is not None + else None + ) + + field = data.get("rpn_version", None) + if field is not None: + args["rpn_version"] = field + + field = data.get("onetime_fees", None) + if field is not None: + args["onetime_fees"] = unmarshal_Offer(field) + + return OfferServerInfo(**args) + + +def unmarshal_OfferServiceLevelInfo(data: Any) -> OfferServiceLevelInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferServiceLevelInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("support_ticket", None) + if field is not None: + args["support_ticket"] = field + + field = data.get("support_phone", None) + if field is not None: + args["support_phone"] = field + + field = data.get("sales_support", None) + if field is not None: + args["sales_support"] = field + + field = data.get("git", None) + if field is not None: + args["git"] = field + + field = data.get("sla", None) + if field is not None: + args["sla"] = field + + field = data.get("priority_support", None) + if field is not None: + args["priority_support"] = field + + field = data.get("high_rpn_bandwidth", None) + if field is not None: + args["high_rpn_bandwidth"] = field + + field = data.get("customization", None) + if field is not None: + args["customization"] = field + + field = data.get("antidos", None) + if field is not None: + args["antidos"] = field + + field = data.get("extra_failover_quota", None) + if field is not None: + args["extra_failover_quota"] = field + + field = data.get("available_options", None) + if field is not None: + args["available_options"] = ( + [unmarshal_Offer(v) for v in field] if field is not None else None + ) + + return OfferServiceLevelInfo(**args) + + +def unmarshal_OfferStorageInfo(data: Any) -> OfferStorageInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferStorageInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("max_quota", None) + if field is not None: + args["max_quota"] = field + + field = data.get("size", None) + if field is not None: + args["size"] = field + + return OfferStorageInfo(**args) + + +def unmarshal_Offer(data: Any) -> Offer: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Offer' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("catalog", None) + if field is not None: + args["catalog"] = field + + field = data.get("payment_frequency", None) + if field is not None: + args["payment_frequency"] = field + + field = data.get("pricing", None) + if field is not None: + args["pricing"] = unmarshal_Money(field) + + field = data.get("server_info", None) + if field is not None: + args["server_info"] = unmarshal_OfferServerInfo(field) + + field = data.get("service_level_info", None) + if field is not None: + args["service_level_info"] = unmarshal_OfferServiceLevelInfo(field) + + field = data.get("rpn_info", None) + if field is not None: + args["rpn_info"] = unmarshal_OfferRPNInfo(field) + + field = data.get("san_info", None) + if field is not None: + args["san_info"] = unmarshal_OfferSANInfo(field) + + field = data.get("antidos_info", None) + if field is not None: + args["antidos_info"] = unmarshal_OfferAntiDosInfo(field) + + field = data.get("backup_info", None) + if field is not None: + args["backup_info"] = unmarshal_OfferBackupInfo(field) + + field = data.get("usb_storage_info", None) + if field is not None: + args["usb_storage_info"] = unmarshal_OfferStorageInfo(field) + + field = data.get("storage_info", None) + if field is not None: + args["storage_info"] = unmarshal_OfferStorageInfo(field) + + field = data.get("license_info", None) + if field is not None: + args["license_info"] = unmarshal_OfferLicenseInfo(field) + + field = data.get("failover_ip_info", None) + if field is not None: + args["failover_ip_info"] = unmarshal_OfferFailoverIpInfo(field) + + field = data.get("failover_block_info", None) + if field is not None: + args["failover_block_info"] = unmarshal_OfferFailoverBlockInfo(field) + + field = data.get("bandwidth_info", None) + if field is not None: + args["bandwidth_info"] = unmarshal_OfferBandwidthInfo(field) + + return Offer(**args) + + +def unmarshal_OS(data: Any) -> OS: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OS' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("version", None) + if field is not None: + args["version"] = field + + field = data.get("arch", None) + if field is not None: + args["arch"] = field + + field = data.get("allow_custom_partitioning", None) + if field is not None: + args["allow_custom_partitioning"] = field + + field = data.get("allow_ssh_keys", None) + if field is not None: + args["allow_ssh_keys"] = field + + field = data.get("requires_user", None) + if field is not None: + args["requires_user"] = field + + field = data.get("requires_admin_password", None) + if field is not None: + args["requires_admin_password"] = field + + field = data.get("requires_panel_password", None) + if field is not None: + args["requires_panel_password"] = field + + field = data.get("allowed_filesystems", None) + if field is not None: + args["allowed_filesystems"] = ( + [PartitionFileSystem(v) for v in field] if field is not None else None + ) + + field = data.get("requires_license", None) + if field is not None: + args["requires_license"] = field + + field = data.get("license_offers", None) + if field is not None: + args["license_offers"] = ( + [unmarshal_Offer(v) for v in field] if field is not None else None + ) + + field = data.get("display_name", None) + if field is not None: + args["display_name"] = field + + field = data.get("password_regex", None) + if field is not None: + args["password_regex"] = field + + field = data.get("hostname_max_length", None) + if field is not None: + args["hostname_max_length"] = field + + field = data.get("max_partitions", None) + if field is not None: + args["max_partitions"] = field + + field = data.get("panel_password_regex", None) + if field is not None: + args["panel_password_regex"] = field + + field = data.get("requires_valid_hostname", None) + if field is not None: + args["requires_valid_hostname"] = field + + field = data.get("hostname_regex", None) + if field is not None: + args["hostname_regex"] = field + + field = data.get("released_at", None) + if field is not None: + args["released_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + return OS(**args) + + +def unmarshal_RpnSan(data: Any) -> RpnSan: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSan' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("server_hostname", None) + if field is not None: + args["server_hostname"] = field + + field = data.get("iqn_suffix", None) + if field is not None: + args["iqn_suffix"] = field + + field = data.get("offer_id", None) + if field is not None: + args["offer_id"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("offer_name", None) + if field is not None: + args["offer_name"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("storage_size", None) + if field is not None: + args["storage_size"] = field + + field = data.get("iqn", None) + if field is not None: + args["iqn"] = field + + field = data.get("rpnv1_compatible", None) + if field is not None: + args["rpnv1_compatible"] = field + + field = data.get("rpnv1_implicit", None) + if field is not None: + args["rpnv1_implicit"] = field + + field = data.get("offer", None) + if field is not None: + args["offer"] = unmarshal_Offer(field) + + field = data.get("delivered_at", None) + if field is not None: + args["delivered_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("terminated_at", None) + if field is not None: + args["terminated_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("expires_at", None) + if field is not None: + args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return RpnSan(**args) + + +def unmarshal_RpnGroup(data: Any) -> RpnGroup: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnGroup' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("active", None) + if field is not None: + args["active"] = field + + field = data.get("owner", None) + if field is not None: + args["owner"] = field + + field = data.get("members_count", None) + if field is not None: + args["members_count"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return RpnGroup(**args) + + +def unmarshal_NetworkInterface(data: Any) -> NetworkInterface: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'NetworkInterface' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("card_id", None) + if field is not None: + args["card_id"] = field + + field = data.get("device_id", None) + if field is not None: + args["device_id"] = field + + field = data.get("mac", None) + if field is not None: + args["mac"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("ips", None) + if field is not None: + args["ips"] = [unmarshal_IP(v) for v in field] if field is not None else None + + return NetworkInterface(**args) + + +def unmarshal_ServerLocation(data: Any) -> ServerLocation: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerLocation' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("rack", None) + if field is not None: + args["rack"] = field + + field = data.get("room", None) + if field is not None: + args["room"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + return ServerLocation(**args) + + +def unmarshal_ServerOption(data: Any) -> ServerOption: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerOption' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("options", None) + if field is not None: + args["options"] = ( + [unmarshal_ServerOption(v) for v in field] if field is not None else None + ) + + field = data.get("offer", None) + if field is not None: + args["offer"] = unmarshal_Offer(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("expired_at", None) + if field is not None: + args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return ServerOption(**args) + + +def unmarshal_ServiceLevel(data: Any) -> ServiceLevel: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServiceLevel' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("offer_id", None) + if field is not None: + args["offer_id"] = field + + field = data.get("level", None) + if field is not None: + args["level"] = field + + return ServiceLevel(**args) + + +def unmarshal_Server(data: Any) -> Server: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Server' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("rebooted_at", None) + if field is not None: + args["rebooted_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("abuse_contact", None) + if field is not None: + args["abuse_contact"] = field + + field = data.get("interfaces", None) + if field is not None: + args["interfaces"] = ( + [unmarshal_NetworkInterface(v) for v in field] + if field is not None + else None + ) + + field = data.get("zone", None) + if field is not None: + args["zone"] = field + + field = data.get("options", None) + if field is not None: + args["options"] = ( + [unmarshal_ServerOption(v) for v in field] if field is not None else None + ) + + field = data.get("has_bmc", None) + if field is not None: + args["has_bmc"] = field + + field = data.get("tags", None) + if field is not None: + args["tags"] = field + + field = data.get("is_outsourced", None) + if field is not None: + args["is_outsourced"] = field + + field = data.get("ipv6_slaac", None) + if field is not None: + args["ipv6_slaac"] = field + + field = data.get("qinq", None) + if field is not None: + args["qinq"] = field + + field = data.get("is_rpnv2_member", None) + if field is not None: + args["is_rpnv2_member"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("expired_at", None) + if field is not None: + args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("offer", None) + if field is not None: + args["offer"] = unmarshal_Offer(field) + + field = data.get("location", None) + if field is not None: + args["location"] = unmarshal_ServerLocation(field) + + field = data.get("os", None) + if field is not None: + args["os"] = unmarshal_OS(field) + + field = data.get("level", None) + if field is not None: + args["level"] = unmarshal_ServiceLevel(field) + + field = data.get("rescue_os", None) + if field is not None: + args["rescue_os"] = unmarshal_OS(field) + + return Server(**args) + + +def unmarshal_RpnV2GroupSubnet(data: Any) -> RpnV2GroupSubnet: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnV2GroupSubnet' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + return RpnV2GroupSubnet(**args) + + +def unmarshal_RpnV2Group(data: Any) -> RpnV2Group: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnV2Group' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("compatible_rpnv1", None) + if field is not None: + args["compatible_rpnv1"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("owner", None) + if field is not None: + args["owner"] = field + + field = data.get("members_count", None) + if field is not None: + args["members_count"] = field + + field = data.get("gateway", None) + if field is not None: + args["gateway"] = field + + field = data.get("subnet", None) + if field is not None: + args["subnet"] = unmarshal_RpnV2GroupSubnet(field) + + field = data.get("rpnv1_group", None) + if field is not None: + args["rpnv1_group"] = unmarshal_RpnGroup(field) + + return RpnV2Group(**args) + + +def unmarshal_Service(data: Any) -> Service: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Service' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("provisioning_status", None) + if field is not None: + args["provisioning_status"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("resource_id", None) + if field is not None: + args["resource_id"] = field + + field = data.get("offer", None) + if field is not None: + args["offer"] = unmarshal_Offer(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("delivered_at", None) + if field is not None: + args["delivered_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("terminated_at", None) + if field is not None: + args["terminated_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("expires_at", None) + if field is not None: + args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return Service(**args) + + +def unmarshal_FailoverBlock(data: Any) -> FailoverBlock: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'FailoverBlock' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("nameservers", None) + if field is not None: + args["nameservers"] = field + + field = data.get("ip_version", None) + if field is not None: + args["ip_version"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + field = data.get("netmask", None) + if field is not None: + args["netmask"] = field + + field = data.get("gateway_ip", None) + if field is not None: + args["gateway_ip"] = field + + return FailoverBlock(**args) + + +def unmarshal_FailoverIP(data: Any) -> FailoverIP: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'FailoverIP' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("reverse", None) + if field is not None: + args["reverse"] = field + + field = data.get("ip_version", None) + if field is not None: + args["ip_version"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + field = data.get("netmask", None) + if field is not None: + args["netmask"] = field + + field = data.get("gateway_ip", None) + if field is not None: + args["gateway_ip"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("mac", None) + if field is not None: + args["mac"] = field + + field = data.get("server_id", None) + if field is not None: + args["server_id"] = field + + field = data.get("block", None) + if field is not None: + args["block"] = unmarshal_FailoverBlock(field) + + field = data.get("server_zone", None) + if field is not None: + args["server_zone"] = field + + return FailoverIP(**args) + + +def unmarshal_BMCAccess(data: Any) -> BMCAccess: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'BMCAccess' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("url", None) + if field is not None: + args["url"] = field + + field = data.get("login", None) + if field is not None: + args["login"] = field + + field = data.get("password", None) + if field is not None: + args["password"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("expires_at", None) + if field is not None: + args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return BMCAccess(**args) + + +def unmarshal_Backup(data: Any) -> Backup: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Backup' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("login", None) + if field is not None: + args["login"] = field + + field = data.get("server", None) + if field is not None: + args["server"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("acl_enabled", None) + if field is not None: + args["acl_enabled"] = field + + field = data.get("autologin", None) + if field is not None: + args["autologin"] = field + + field = data.get("quota_space", None) + if field is not None: + args["quota_space"] = field + + field = data.get("quota_space_used", None) + if field is not None: + args["quota_space_used"] = field + + field = data.get("quota_files", None) + if field is not None: + args["quota_files"] = field + + field = data.get("quota_files_used", None) + if field is not None: + args["quota_files_used"] = field + + return Backup(**args) + + +def unmarshal_CanOrderResponse(data: Any) -> CanOrderResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'CanOrderResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("can_order", None) + if field is not None: + args["can_order"] = field + + field = data.get("quota_ok", None) + if field is not None: + args["quota_ok"] = field + + field = data.get("phone_confirmed", None) + if field is not None: + args["phone_confirmed"] = field + + field = data.get("email_confirmed", None) + if field is not None: + args["email_confirmed"] = field + + field = data.get("user_confirmed", None) + if field is not None: + args["user_confirmed"] = field + + field = data.get("payment_mode", None) + if field is not None: + args["payment_mode"] = field + + field = data.get("billing_ok", None) + if field is not None: + args["billing_ok"] = field + + field = data.get("message", None) + if field is not None: + args["message"] = field + + return CanOrderResponse(**args) + + +def unmarshal_CreateFailoverIPsResponse(data: Any) -> CreateFailoverIPsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'CreateFailoverIPsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("services", None) + if field is not None: + args["services"] = ( + [unmarshal_Service(v) for v in field] if field is not None else None + ) + + return CreateFailoverIPsResponse(**args) + + +def unmarshal_GetIPv6BlockQuotasResponseQuota( + data: Any, +) -> GetIPv6BlockQuotasResponseQuota: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GetIPv6BlockQuotasResponseQuota' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("quota", None) + if field is not None: + args["quota"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + return GetIPv6BlockQuotasResponseQuota(**args) + + +def unmarshal_GetIPv6BlockQuotasResponse(data: Any) -> GetIPv6BlockQuotasResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GetIPv6BlockQuotasResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("quotas", None) + if field is not None: + args["quotas"] = ( + [unmarshal_GetIPv6BlockQuotasResponseQuota(v) for v in field] + if field is not None + else None + ) + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + return GetIPv6BlockQuotasResponse(**args) + + +def unmarshal_GetRemainingQuotaResponse(data: Any) -> GetRemainingQuotaResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GetRemainingQuotaResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("failover_ip_quota", None) + if field is not None: + args["failover_ip_quota"] = field + + field = data.get("failover_ip_remaining_quota", None) + if field is not None: + args["failover_ip_remaining_quota"] = field + + field = data.get("failover_block_quota", None) + if field is not None: + args["failover_block_quota"] = field + + field = data.get("failover_block_remaining_quota", None) + if field is not None: + args["failover_block_remaining_quota"] = field + + return GetRemainingQuotaResponse(**args) + + +def unmarshal_GetRpnStatusResponse(data: Any) -> GetRpnStatusResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GetRpnStatusResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("operations_left", None) + if field is not None: + args["operations_left"] = field + + return GetRpnStatusResponse(**args) + + +def unmarshal_IPv6Block(data: Any) -> IPv6Block: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'IPv6Block' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("duid", None) + if field is not None: + args["duid"] = field + + field = data.get("nameservers", None) + if field is not None: + args["nameservers"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + field = data.get("subnets", None) + if field is not None: + args["subnets"] = ( + [unmarshal_IPv6Block(v) for v in field] if field is not None else None + ) + + field = data.get("delegation_status", None) + if field is not None: + args["delegation_status"] = field + + return IPv6Block(**args) + + +def unmarshal_Invoice(data: Any) -> Invoice: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Invoice' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("payment_method", None) + if field is not None: + args["payment_method"] = field + + field = data.get("content", None) + if field is not None: + args["content"] = field + + field = data.get("transaction_id", None) + if field is not None: + args["transaction_id"] = field + + field = data.get("total_with_taxes", None) + if field is not None: + args["total_with_taxes"] = unmarshal_Money(field) + + field = data.get("total_without_taxes", None) + if field is not None: + args["total_without_taxes"] = unmarshal_Money(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("paid_at", None) + if field is not None: + args["paid_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return Invoice(**args) + + +def unmarshal_ListFailoverIPsResponse(data: Any) -> ListFailoverIPsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListFailoverIPsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("failover_ips", None) + if field is not None: + args["failover_ips"] = ( + [unmarshal_FailoverIP(v) for v in field] if field is not None else None + ) + + return ListFailoverIPsResponse(**args) + + +def unmarshal_ListIPv6BlockSubnetsAvailableResponseSubnet( + data: Any, +) -> ListIPv6BlockSubnetsAvailableResponseSubnet: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListIPv6BlockSubnetsAvailableResponseSubnet' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + return ListIPv6BlockSubnetsAvailableResponseSubnet(**args) + + +def unmarshal_ListIPv6BlockSubnetsAvailableResponse( + data: Any, +) -> ListIPv6BlockSubnetsAvailableResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListIPv6BlockSubnetsAvailableResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("subnet_availables", None) + if field is not None: + args["subnet_availables"] = ( + [unmarshal_ListIPv6BlockSubnetsAvailableResponseSubnet(v) for v in field] + if field is not None + else None + ) + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + return ListIPv6BlockSubnetsAvailableResponse(**args) + + +def unmarshal_InvoiceSummary(data: Any) -> InvoiceSummary: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'InvoiceSummary' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("payment_method", None) + if field is not None: + args["payment_method"] = field + + field = data.get("transaction_id", None) + if field is not None: + args["transaction_id"] = field + + field = data.get("total_with_taxes", None) + if field is not None: + args["total_with_taxes"] = unmarshal_Money(field) + + field = data.get("total_without_taxes", None) + if field is not None: + args["total_without_taxes"] = unmarshal_Money(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("paid_at", None) + if field is not None: + args["paid_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return InvoiceSummary(**args) + + +def unmarshal_ListInvoicesResponse(data: Any) -> ListInvoicesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListInvoicesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("invoices", None) + if field is not None: + args["invoices"] = ( + [unmarshal_InvoiceSummary(v) for v in field] if field is not None else None + ) + + return ListInvoicesResponse(**args) + + +def unmarshal_RpnSanIpRpnV2Group(data: Any) -> RpnSanIpRpnV2Group: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSanIpRpnV2Group' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + return RpnSanIpRpnV2Group(**args) + + +def unmarshal_RpnSanIpServer(data: Any) -> RpnSanIpServer: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSanIpServer' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + return RpnSanIpServer(**args) + + +def unmarshal_RpnSanIp(data: Any) -> RpnSanIp: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSanIp' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("server", None) + if field is not None: + args["server"] = unmarshal_RpnSanIpServer(field) + + field = data.get("rpnv2_group", None) + if field is not None: + args["rpnv2_group"] = unmarshal_RpnSanIpRpnV2Group(field) + + field = data.get("ip", None) + if field is not None: + args["ip"] = unmarshal_IP(field) + + return RpnSanIp(**args) + + +def unmarshal_ListIpsResponse(data: Any) -> ListIpsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListIpsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("ips", None) + if field is not None: + args["ips"] = ( + [unmarshal_RpnSanIp(v) for v in field] if field is not None else None + ) + + return ListIpsResponse(**args) + + +def unmarshal_ListOSResponse(data: Any) -> ListOSResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListOSResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("os", None) + if field is not None: + args["os"] = [unmarshal_OS(v) for v in field] if field is not None else None + + return ListOSResponse(**args) + + +def unmarshal_ListOffersResponse(data: Any) -> ListOffersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListOffersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("offers", None) + if field is not None: + args["offers"] = ( + [unmarshal_Offer(v) for v in field] if field is not None else None + ) + + return ListOffersResponse(**args) + + +def unmarshal_RefundSummary(data: Any) -> RefundSummary: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RefundSummary' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("method", None) + if field is not None: + args["method"] = field + + field = data.get("total_with_taxes", None) + if field is not None: + args["total_with_taxes"] = unmarshal_Money(field) + + field = data.get("total_without_taxes", None) + if field is not None: + args["total_without_taxes"] = unmarshal_Money(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("refunded_at", None) + if field is not None: + args["refunded_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + return RefundSummary(**args) + + +def unmarshal_ListRefundsResponse(data: Any) -> ListRefundsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRefundsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("refunds", None) + if field is not None: + args["refunds"] = ( + [unmarshal_RefundSummary(v) for v in field] if field is not None else None + ) + + return ListRefundsResponse(**args) + + +def unmarshal_RpnSanServer(data: Any) -> RpnSanServer: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSanServer' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("sans", None) + if field is not None: + args["sans"] = ( + [unmarshal_RpnSan(v) for v in field] if field is not None else None + ) + + field = data.get("zone", None) + if field is not None: + args["zone"] = field + + return RpnSanServer(**args) + + +def unmarshal_ListRpnCapableSanServersResponse( + data: Any, +) -> ListRpnCapableSanServersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnCapableSanServersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("san_servers", None) + if field is not None: + args["san_servers"] = ( + [unmarshal_RpnSanServer(v) for v in field] if field is not None else None + ) + + return ListRpnCapableSanServersResponse(**args) + + +def unmarshal_ListRpnCapableServersResponse(data: Any) -> ListRpnCapableServersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnCapableServersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("servers", None) + if field is not None: + args["servers"] = ( + [unmarshal_Server(v) for v in field] if field is not None else None + ) + + return ListRpnCapableServersResponse(**args) + + +def unmarshal_RpnGroupMember(data: Any) -> RpnGroupMember: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnGroupMember' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("group_id", None) + if field is not None: + args["group_id"] = field + + field = data.get("group_name", None) + if field is not None: + args["group_name"] = field + + field = data.get("group_owner", None) + if field is not None: + args["group_owner"] = field + + field = data.get("owner", None) + if field is not None: + args["owner"] = field + + field = data.get("san_server", None) + if field is not None: + args["san_server"] = unmarshal_RpnSanServer(field) + + field = data.get("server", None) + if field is not None: + args["server"] = unmarshal_Server(field) + + field = data.get("speed", None) + if field is not None: + args["speed"] = field + + return RpnGroupMember(**args) + + +def unmarshal_ListRpnGroupMembersResponse(data: Any) -> ListRpnGroupMembersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnGroupMembersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("members", None) + if field is not None: + args["members"] = ( + [unmarshal_RpnGroupMember(v) for v in field] if field is not None else None + ) + + return ListRpnGroupMembersResponse(**args) + + +def unmarshal_ListRpnGroupsResponse(data: Any) -> ListRpnGroupsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnGroupsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("rpn_groups", None) + if field is not None: + args["rpn_groups"] = ( + [unmarshal_RpnGroup(v) for v in field] if field is not None else None + ) + + return ListRpnGroupsResponse(**args) + + +def unmarshal_ListRpnInvitesResponse(data: Any) -> ListRpnInvitesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnInvitesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("members", None) + if field is not None: + args["members"] = ( + [unmarshal_RpnGroupMember(v) for v in field] if field is not None else None + ) + + return ListRpnInvitesResponse(**args) + + +def unmarshal_RpnSanSummary(data: Any) -> RpnSanSummary: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSanSummary' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("server_hostname", None) + if field is not None: + args["server_hostname"] = field + + field = data.get("iqn_suffix", None) + if field is not None: + args["iqn_suffix"] = field + + field = data.get("offer_id", None) + if field is not None: + args["offer_id"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("offer_name", None) + if field is not None: + args["offer_name"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("storage_size", None) + if field is not None: + args["storage_size"] = field + + field = data.get("rpnv1_compatible", None) + if field is not None: + args["rpnv1_compatible"] = field + + field = data.get("rpnv1_implicit", None) + if field is not None: + args["rpnv1_implicit"] = field + + field = data.get("delivered_at", None) + if field is not None: + args["delivered_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("terminated_at", None) + if field is not None: + args["terminated_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("expires_at", None) + if field is not None: + args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return RpnSanSummary(**args) + + +def unmarshal_ListRpnSansResponse(data: Any) -> ListRpnSansResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnSansResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("rpn_sans", None) + if field is not None: + args["rpn_sans"] = ( + [unmarshal_RpnSanSummary(v) for v in field] if field is not None else None + ) + + return ListRpnSansResponse(**args) + + +def unmarshal_RpnServerCapability(data: Any) -> RpnServerCapability: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnServerCapability' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + field = data.get("zone", None) + if field is not None: + args["zone"] = field + + field = data.get("compatible_qinq", None) + if field is not None: + args["compatible_qinq"] = field + + field = data.get("can_join_qinq_group", None) + if field is not None: + args["can_join_qinq_group"] = field + + field = data.get("rpnv1_group_count", None) + if field is not None: + args["rpnv1_group_count"] = field + + field = data.get("rpnv2_group_count", None) + if field is not None: + args["rpnv2_group_count"] = field + + field = data.get("can_join_rpnv2_group", None) + if field is not None: + args["can_join_rpnv2_group"] = field + + field = data.get("ip_address", None) + if field is not None: + args["ip_address"] = field + + field = data.get("rpn_version", None) + if field is not None: + args["rpn_version"] = field + + return RpnServerCapability(**args) + + +def unmarshal_ListRpnServerCapabilitiesResponse( + data: Any, +) -> ListRpnServerCapabilitiesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnServerCapabilitiesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("servers", None) + if field is not None: + args["servers"] = ( + [unmarshal_RpnServerCapability(v) for v in field] + if field is not None + else None + ) + + return ListRpnServerCapabilitiesResponse(**args) + + +def unmarshal_ListRpnV2CapableResourcesResponse( + data: Any, +) -> ListRpnV2CapableResourcesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnV2CapableResourcesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("servers", None) + if field is not None: + args["servers"] = ( + [unmarshal_Server(v) for v in field] if field is not None else None + ) + + return ListRpnV2CapableResourcesResponse(**args) + + +def unmarshal_RpnV2Member(data: Any) -> RpnV2Member: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnV2Member' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("vlan", None) + if field is not None: + args["vlan"] = field + + field = data.get("server", None) + if field is not None: + args["server"] = unmarshal_Server(field) + + field = data.get("rpnv1_group", None) + if field is not None: + args["rpnv1_group"] = unmarshal_RpnGroup(field) + + field = data.get("speed", None) + if field is not None: + args["speed"] = field + + return RpnV2Member(**args) + + +def unmarshal_Log(data: Any) -> Log: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Log' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("action", None) + if field is not None: + args["action"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("group", None) + if field is not None: + args["group"] = unmarshal_RpnV2Group(field) + + field = data.get("member", None) + if field is not None: + args["member"] = unmarshal_RpnV2Member(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("finished_at", None) + if field is not None: + args["finished_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + return Log(**args) + + +def unmarshal_ListRpnV2GroupLogsResponse(data: Any) -> ListRpnV2GroupLogsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnV2GroupLogsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("logs", None) + if field is not None: + args["logs"] = [unmarshal_Log(v) for v in field] if field is not None else None + + return ListRpnV2GroupLogsResponse(**args) + + +def unmarshal_ListRpnV2GroupsResponse(data: Any) -> ListRpnV2GroupsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnV2GroupsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("rpn_groups", None) + if field is not None: + args["rpn_groups"] = ( + [unmarshal_RpnV2Group(v) for v in field] if field is not None else None + ) + + return ListRpnV2GroupsResponse(**args) + + +def unmarshal_ListRpnV2MembersResponse(data: Any) -> ListRpnV2MembersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnV2MembersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("members", None) + if field is not None: + args["members"] = ( + [unmarshal_RpnV2Member(v) for v in field] if field is not None else None + ) + + return ListRpnV2MembersResponse(**args) + + +def unmarshal_ServerDisk(data: Any) -> ServerDisk: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerDisk' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("connector", None) + if field is not None: + args["connector"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("capacity", None) + if field is not None: + args["capacity"] = field + + field = data.get("is_addon", None) + if field is not None: + args["is_addon"] = field + + return ServerDisk(**args) + + +def unmarshal_ListServerDisksResponse(data: Any) -> ListServerDisksResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListServerDisksResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("disks", None) + if field is not None: + args["disks"] = ( + [unmarshal_ServerDisk(v) for v in field] if field is not None else None + ) + + return ListServerDisksResponse(**args) + + +def unmarshal_ServerEvent(data: Any) -> ServerEvent: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerEvent' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("event_id", None) + if field is not None: + args["event_id"] = field + + field = data.get("description", None) + if field is not None: + args["description"] = field + + field = data.get("date", None) + if field is not None: + args["date"] = parser.isoparse(field) if isinstance(field, str) else field + + return ServerEvent(**args) + + +def unmarshal_ListServerEventsResponse(data: Any) -> ListServerEventsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListServerEventsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("events", None) + if field is not None: + args["events"] = ( + [unmarshal_ServerEvent(v) for v in field] if field is not None else None + ) + + return ListServerEventsResponse(**args) + + +def unmarshal_ServerSummary(data: Any) -> ServerSummary: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerSummary' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("expired_at", None) + if field is not None: + args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("offer_id", None) + if field is not None: + args["offer_id"] = field + + field = data.get("offer_name", None) + if field is not None: + args["offer_name"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("interfaces", None) + if field is not None: + args["interfaces"] = ( + [unmarshal_NetworkInterface(v) for v in field] + if field is not None + else None + ) + + field = data.get("zone", None) + if field is not None: + args["zone"] = field + + field = data.get("is_outsourced", None) + if field is not None: + args["is_outsourced"] = field + + field = data.get("qinq", None) + if field is not None: + args["qinq"] = field + + field = data.get("os_id", None) + if field is not None: + args["os_id"] = field + + field = data.get("level", None) + if field is not None: + args["level"] = unmarshal_ServiceLevel(field) + + field = data.get("rpn_version", None) + if field is not None: + args["rpn_version"] = field + + return ServerSummary(**args) + + +def unmarshal_ListServersResponse(data: Any) -> ListServersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListServersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("servers", None) + if field is not None: + args["servers"] = ( + [unmarshal_ServerSummary(v) for v in field] if field is not None else None + ) + + return ListServersResponse(**args) + + +def unmarshal_ListServicesResponse(data: Any) -> ListServicesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListServicesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("services", None) + if field is not None: + args["services"] = ( + [unmarshal_Service(v) for v in field] if field is not None else None + ) + + return ListServicesResponse(**args) + + +def unmarshal_ListSubscribableServerOptionsResponse( + data: Any, +) -> ListSubscribableServerOptionsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListSubscribableServerOptionsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("server_options", None) + if field is not None: + args["server_options"] = ( + [unmarshal_Offer(v) for v in field] if field is not None else None + ) + + return ListSubscribableServerOptionsResponse(**args) + + +def unmarshal_RaidArray(data: Any) -> RaidArray: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RaidArray' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("raid_level", None) + if field is not None: + args["raid_level"] = field + + field = data.get("disks", None) + if field is not None: + args["disks"] = ( + [unmarshal_ServerDisk(v) for v in field] if field is not None else None + ) + + return RaidArray(**args) + + +def unmarshal_Raid(data: Any) -> Raid: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Raid' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("raid_arrays", None) + if field is not None: + args["raid_arrays"] = ( + [unmarshal_RaidArray(v) for v in field] if field is not None else None + ) + + return Raid(**args) + + +def unmarshal_Refund(data: Any) -> Refund: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Refund' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("method", None) + if field is not None: + args["method"] = field + + field = data.get("content", None) + if field is not None: + args["content"] = field + + field = data.get("total_with_taxes", None) + if field is not None: + args["total_with_taxes"] = unmarshal_Money(field) + + field = data.get("total_without_taxes", None) + if field is not None: + args["total_without_taxes"] = unmarshal_Money(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("refunded_at", None) + if field is not None: + args["refunded_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + return Refund(**args) + + +def unmarshal_Rescue(data: Any) -> Rescue: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Rescue' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("os_id", None) + if field is not None: + args["os_id"] = field + + field = data.get("login", None) + if field is not None: + args["login"] = field + + field = data.get("password", None) + if field is not None: + args["password"] = field + + field = data.get("protocol", None) + if field is not None: + args["protocol"] = field + + return Rescue(**args) + + +def unmarshal_Partition(data: Any) -> Partition: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Partition' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("file_system", None) + if field is not None: + args["file_system"] = field + + field = data.get("raid_level", None) + if field is not None: + args["raid_level"] = field + + field = data.get("capacity", None) + if field is not None: + args["capacity"] = field + + field = data.get("connectors", None) + if field is not None: + args["connectors"] = field + + field = data.get("mount_point", None) + if field is not None: + args["mount_point"] = field + + return Partition(**args) + + +def unmarshal_ServerDefaultPartitioning(data: Any) -> ServerDefaultPartitioning: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerDefaultPartitioning' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("partitions", None) + if field is not None: + args["partitions"] = ( + [unmarshal_Partition(v) for v in field] if field is not None else None + ) + + return ServerDefaultPartitioning(**args) + + +def unmarshal_ServerInstall(data: Any) -> ServerInstall: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerInstall' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("os_id", None) + if field is not None: + args["os_id"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("partitions", None) + if field is not None: + args["partitions"] = ( + [unmarshal_Partition(v) for v in field] if field is not None else None + ) + + field = data.get("ssh_key_ids", None) + if field is not None: + args["ssh_key_ids"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("user_login", None) + if field is not None: + args["user_login"] = field + + field = data.get("panel_url", None) + if field is not None: + args["panel_url"] = field + + return ServerInstall(**args) + + +def unmarshal_SubscribeStorageOptionsResponse( + data: Any, +) -> SubscribeStorageOptionsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'SubscribeStorageOptionsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("services", None) + if field is not None: + args["services"] = ( + [unmarshal_Service(v) for v in field] if field is not None else None + ) + + return SubscribeStorageOptionsResponse(**args) + + +def marshal_AttachFailoverIPToMacAddressRequest( + request: AttachFailoverIPToMacAddressRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.type_ is not None: + output["type"] = str(request.type_) + + if request.mac is not None: + output["mac"] = request.mac + + return output + + +def marshal_AttachFailoverIPsRequest( + request: AttachFailoverIPsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.server_id is not None: + output["server_id"] = request.server_id + + if request.fips_ids is not None: + output["fips_ids"] = request.fips_ids + + return output + + +def marshal_CreateFailoverIPsRequest( + request: CreateFailoverIPsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.offer_id is not None: + output["offer_id"] = request.offer_id + + if request.quantity is not None: + output["quantity"] = request.quantity + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_CreateServerRequest( + request: CreateServerRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.offer_id is not None: + output["offer_id"] = request.offer_id + + if request.server_option_ids is not None: + output["server_option_ids"] = request.server_option_ids + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.datacenter_name is not None: + output["datacenter_name"] = request.datacenter_name + + return output + + +def marshal_DetachFailoverIPsRequest( + request: DetachFailoverIPsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.fips_ids is not None: + output["fips_ids"] = request.fips_ids + + return output + + +def marshal_IPv6BlockApiCreateIPv6BlockRequest( + request: IPv6BlockApiCreateIPv6BlockRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id + + return output + + +def marshal_IPv6BlockApiCreateIPv6BlockSubnetRequest( + request: IPv6BlockApiCreateIPv6BlockSubnetRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.address is not None: + output["address"] = request.address + + if request.cidr is not None: + output["cidr"] = request.cidr + + return output + + +def marshal_IPv6BlockApiUpdateIPv6BlockRequest( + request: IPv6BlockApiUpdateIPv6BlockRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.nameservers is not None: + output["nameservers"] = request.nameservers + + return output + + +def marshal_InstallPartition( + request: InstallPartition, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.file_system is not None: + output["file_system"] = str(request.file_system) + + if request.raid_level is not None: + output["raid_level"] = str(request.raid_level) + + if request.capacity is not None: + output["capacity"] = request.capacity + + if request.connectors is not None: + output["connectors"] = request.connectors + + if request.mount_point is not None: + output["mount_point"] = request.mount_point + + return output + + +def marshal_InstallServerRequest( + request: InstallServerRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.os_id is not None: + output["os_id"] = request.os_id + + if request.hostname is not None: + output["hostname"] = request.hostname + + if request.user_login is not None: + output["user_login"] = request.user_login + + if request.user_password is not None: + output["user_password"] = request.user_password + + if request.panel_password is not None: + output["panel_password"] = request.panel_password + + if request.root_password is not None: + output["root_password"] = request.root_password + + if request.partitions is not None: + output["partitions"] = [ + marshal_InstallPartition(item, defaults) for item in request.partitions + ] + + if request.ssh_key_ids is not None: + output["ssh_key_ids"] = request.ssh_key_ids + + if request.license_offer_id is not None: + output["license_offer_id"] = request.license_offer_id + + if request.ip_id is not None: + output["ip_id"] = request.ip_id + + return output + + +def marshal_RpnSanApiAddIpRequest( + request: RpnSanApiAddIpRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.ip_ids is not None: + output["ip_ids"] = request.ip_ids + + return output + + +def marshal_RpnSanApiCreateRpnSanRequest( + request: RpnSanApiCreateRpnSanRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.offer_id is not None: + output["offer_id"] = request.offer_id + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RpnSanApiRemoveIpRequest( + request: RpnSanApiRemoveIpRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.ip_ids is not None: + output["ip_ids"] = request.ip_ids + + return output + + +def marshal_RpnV1ApiAddRpnGroupMembersRequest( + request: RpnV1ApiAddRpnGroupMembersRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.server_ids is not None: + output["server_ids"] = request.server_ids + + if request.san_server_ids is not None: + output["san_server_ids"] = request.san_server_ids + + return output + + +def marshal_RpnV1ApiCreateRpnGroupRequest( + request: RpnV1ApiCreateRpnGroupRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + if request.server_ids is not None: + output["server_ids"] = request.server_ids + + if request.san_server_ids is not None: + output["san_server_ids"] = request.san_server_ids + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RpnV1ApiDeleteRpnGroupMembersRequest( + request: RpnV1ApiDeleteRpnGroupMembersRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.member_ids is not None: + output["member_ids"] = request.member_ids + + return output + + +def marshal_RpnV1ApiLeaveRpnGroupRequest( + request: RpnV1ApiLeaveRpnGroupRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.member_ids is not None: + output["member_ids"] = request.member_ids + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RpnV1ApiRpnGroupInviteRequest( + request: RpnV1ApiRpnGroupInviteRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.server_ids is not None: + output["server_ids"] = request.server_ids + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RpnV1ApiUpdateRpnGroupNameRequest( + request: RpnV1ApiUpdateRpnGroupNameRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + return output + + +def marshal_RpnV2ApiAddRpnV2MembersRequest( + request: RpnV2ApiAddRpnV2MembersRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.servers is not None: + output["servers"] = request.servers + + return output + + +def marshal_RpnV2ApiCreateRpnV2GroupRequest( + request: RpnV2ApiCreateRpnV2GroupRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + if request.servers is not None: + output["servers"] = request.servers + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.type_ is not None: + output["type"] = str(request.type_) + + return output + + +def marshal_RpnV2ApiDeleteRpnV2MembersRequest( + request: RpnV2ApiDeleteRpnV2MembersRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.member_ids is not None: + output["member_ids"] = request.member_ids + + return output + + +def marshal_RpnV2ApiEnableRpnV2GroupCompatibilityRequest( + request: RpnV2ApiEnableRpnV2GroupCompatibilityRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.rpnv1_group_id is not None: + output["rpnv1_group_id"] = request.rpnv1_group_id + + return output + + +def marshal_RpnV2ApiUpdateRpnV2GroupNameRequest( + request: RpnV2ApiUpdateRpnV2GroupNameRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + return output + + +def marshal_RpnV2ApiUpdateRpnV2VlanForMembersRequest( + request: RpnV2ApiUpdateRpnV2VlanForMembersRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.member_ids is not None: + output["member_ids"] = request.member_ids + + if request.vlan is not None: + output["vlan"] = request.vlan + + return output + + +def marshal_StartBMCAccessRequest( + request: StartBMCAccessRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.ip is not None: + output["ip"] = request.ip + + return output + + +def marshal_StartRescueRequest( + request: StartRescueRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.os_id is not None: + output["os_id"] = request.os_id + + return output + + +def marshal_SubscribeServerOptionRequest( + request: SubscribeServerOptionRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.option_id is not None: + output["option_id"] = request.option_id + + return output + + +def marshal_SubscribeStorageOptionsRequest( + request: SubscribeStorageOptionsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.options_ids is not None: + output["options_ids"] = request.options_ids + + return output + + +def marshal_UpdatableRaidArray( + request: UpdatableRaidArray, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.raid_level is not None: + output["raid_level"] = str(request.raid_level) + + if request.disk_ids is not None: + output["disk_ids"] = request.disk_ids + + return output + + +def marshal_UpdateRaidRequest( + request: UpdateRaidRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.raid_arrays is not None: + output["raid_arrays"] = [ + marshal_UpdatableRaidArray(item, defaults) for item in request.raid_arrays + ] + + return output + + +def marshal_UpdateReverseRequest( + request: UpdateReverseRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.reverse is not None: + output["reverse"] = request.reverse + + return output + + +def marshal_UpdateServerBackupRequest( + request: UpdateServerBackupRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.password is not None: + output["password"] = request.password + + if request.autologin is not None: + output["autologin"] = request.autologin + + if request.acl_enabled is not None: + output["acl_enabled"] = request.acl_enabled + + return output + + +def marshal_UpdateServerRequest( + request: UpdateServerRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.hostname is not None: + output["hostname"] = request.hostname + + if request.enable_ipv6 is not None: + output["enable_ipv6"] = request.enable_ipv6 + + return output + + +def marshal_UpdateServerTagsRequest( + request: UpdateServerTagsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.tags is not None: + output["tags"] = request.tags + + return output diff --git a/scaleway-async/scaleway_async/dedibox/v1/types.py b/scaleway-async/scaleway_async/dedibox/v1/types.py new file mode 100644 index 000000000..0c058935e --- /dev/null +++ b/scaleway-async/scaleway_async/dedibox/v1/types.py @@ -0,0 +1,4452 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +from typing import Dict, List, Optional + +from scaleway_core.bridge import ( + Money, + Zone, +) +from scaleway_core.utils import ( + StrEnumMeta, +) + + +class AttachFailoverIPToMacAddressRequestMacType(str, Enum, metaclass=StrEnumMeta): + MAC_TYPE_UNKNOWN = "mac_type_unknown" + VMWARE = "vmware" + KVM = "kvm" + XEN = "xen" + + def __str__(self) -> str: + return str(self.value) + + +class BMCAccessStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + CREATING = "creating" + CREATED = "created" + DELETING = "deleting" + + def __str__(self) -> str: + return str(self.value) + + +class BackupStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_BACKUP_STATUS = "unknown_backup_status" + UNINITIALIZED = "uninitialized" + INACTIVE = "inactive" + READY = "ready" + + def __str__(self) -> str: + return str(self.value) + + +class FailoverBlockVersion(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_VERSION = "unknown_version" + IPV4 = "ipv4" + IPV6 = "ipv6" + + def __str__(self) -> str: + return str(self.value) + + +class FailoverIPInterfaceType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + NORMAL = "normal" + IPMI = "ipmi" + VIRTUAL = "virtual" + + def __str__(self) -> str: + return str(self.value) + + +class FailoverIPStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + READY = "ready" + BUSY = "busy" + LOCKED = "locked" + + def __str__(self) -> str: + return str(self.value) + + +class FailoverIPVersion(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_VERSION = "unknown_version" + IPV4 = "ipv4" + IPV6 = "ipv6" + + def __str__(self) -> str: + return str(self.value) + + +class GetRpnStatusResponseStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + BUSY = "busy" + OPERATIONAL = "operational" + + def __str__(self) -> str: + return str(self.value) + + +class IPSemantic(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + PROXAD = "proxad" + EXT = "ext" + PUBLIC = "public" + PRIVATE = "private" + IPMI = "ipmi" + ADM = "adm" + REDIRECT = "redirect" + MIGRATION = "migration" + + def __str__(self) -> str: + return str(self.value) + + +class IPStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + READY = "ready" + BUSY = "busy" + LOCKED = "locked" + + def __str__(self) -> str: + return str(self.value) + + +class IPVersion(str, Enum, metaclass=StrEnumMeta): + IPV4 = "ipv4" + IPV6 = "ipv6" + + def __str__(self) -> str: + return str(self.value) + + +class IPv6BlockDelegationStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + UPDATING = "updating" + DONE = "done" + + def __str__(self) -> str: + return str(self.value) + + +class InvoicePaymentMethod(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_PAYMENT_METHOD = "unknown_payment_method" + CREDIT_CARD = "credit_card" + AMEX = "amex" + PAYPAL = "paypal" + TRANSFER = "transfer" + DIRECT_DEBIT = "direct_debit" + + def __str__(self) -> str: + return str(self.value) + + +class InvoiceStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_INVOICE_STATUS = "unknown_invoice_status" + UNPAID = "unpaid" + PAID = "paid" + ERRORED = "errored" + + def __str__(self) -> str: + return str(self.value) + + +class ListFailoverIPsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + IP_ASC = "ip_asc" + IP_DESC = "ip_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListInvoicesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListOSRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + RELEASED_AT_ASC = "released_at_asc" + RELEASED_AT_DESC = "released_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListOffersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + PRICE_ASC = "price_asc" + PRICE_DESC = "price_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRefundsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnCapableSanServersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnCapableServersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnGroupMembersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnGroupsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnInvitesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnSansRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnServerCapabilitiesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnV2CapableResourcesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnV2GroupLogsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnV2GroupsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnV2MembersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnV2MembersRequestType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + RPNV1_GROUP = "rpnv1_group" + SERVER = "server" + + def __str__(self) -> str: + return str(self.value) + + +class ListServerDisksRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListServerEventsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListServersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListServicesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class LogAction(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_LOG_ACTION = "unknown_log_action" + GROUP_CREATED = "group_created" + GROUP_DELETED = "group_deleted" + MEMBERS_ADDED = "members_added" + MEMBERS_DELETED = "members_deleted" + DESCRIPTION_UPDATED = "description_updated" + RPNV1_MEMBERS_ADDED = "rpnv1_members_added" + RPNV1_MEMBERS_DELETED = "rpnv1_members_deleted" + VLAN_UPDATED = "vlan_updated" + VLAN_UPDATED_ON_ALL_SERVERS = "vlan_updated_on_all_servers" + + def __str__(self) -> str: + return str(self.value) + + +class LogStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_LOG_STATUS = "unknown_log_status" + SUCCESS = "success" + IN_PROGRESS = "in_progress" + ERROR = "error" + + def __str__(self) -> str: + return str(self.value) + + +class MemoryType(str, Enum, metaclass=StrEnumMeta): + DDR2 = "ddr2" + DDR3 = "ddr3" + DDR4 = "ddr4" + + def __str__(self) -> str: + return str(self.value) + + +class NetworkInterfaceInterfaceType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + NORMAL = "normal" + IPMI = "ipmi" + VIRTUAL = "virtual" + + def __str__(self) -> str: + return str(self.value) + + +class OSArch(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_ARCH = "unknown_arch" + AMD64 = "amd64" + X86 = "x86" + ARM = "arm" + ARM64 = "arm64" + + def __str__(self) -> str: + return str(self.value) + + +class OSType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + SERVER = "server" + VIRTU = "virtu" + PANEL = "panel" + DESKTOP = "desktop" + CUSTOM = "custom" + RESCUE = "rescue" + + def __str__(self) -> str: + return str(self.value) + + +class OfferAntiDosInfoType(str, Enum, metaclass=StrEnumMeta): + MINIMAL = "minimal" + PREVENTIVE = "preventive" + CURATIVE = "curative" + + def __str__(self) -> str: + return str(self.value) + + +class OfferCatalog(str, Enum, metaclass=StrEnumMeta): + ALL = "all" + DEFAULT = "default" + BETA = "beta" + RESELLER = "reseller" + PREMIUM = "premium" + VOLUME = "volume" + ADMIN = "admin" + INACTIVE = "inactive" + + def __str__(self) -> str: + return str(self.value) + + +class OfferPaymentFrequency(str, Enum, metaclass=StrEnumMeta): + MONTHLY = "monthly" + ONESHOT = "oneshot" + + def __str__(self) -> str: + return str(self.value) + + +class OfferSANInfoType(str, Enum, metaclass=StrEnumMeta): + HDD = "hdd" + SSD = "ssd" + + def __str__(self) -> str: + return str(self.value) + + +class OfferServerInfoStock(str, Enum, metaclass=StrEnumMeta): + EMPTY = "empty" + LOW = "low" + AVAILABLE = "available" + + def __str__(self) -> str: + return str(self.value) + + +class PartitionFileSystem(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + EFI = "efi" + SWAP = "swap" + EXT4 = "ext4" + EXT3 = "ext3" + EXT2 = "ext2" + XFS = "xfs" + NTFS = "ntfs" + FAT32 = "fat32" + UFS = "ufs" + + def __str__(self) -> str: + return str(self.value) + + +class PartitionType(str, Enum, metaclass=StrEnumMeta): + PRIMARY = "primary" + EXTENDED = "extended" + LOGICAL = "logical" + + def __str__(self) -> str: + return str(self.value) + + +class RaidArrayRaidLevel(str, Enum, metaclass=StrEnumMeta): + NO_RAID = "no_raid" + RAID0 = "raid0" + RAID1 = "raid1" + RAID5 = "raid5" + RAID6 = "raid6" + RAID10 = "raid10" + + def __str__(self) -> str: + return str(self.value) + + +class RefundMethod(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_REFUND_METHOD = "unknown_refund_method" + CREDIT_CARD = "credit_card" + AMEX = "amex" + PAYPAL = "paypal" + TRANSFER = "transfer" + + def __str__(self) -> str: + return str(self.value) + + +class RefundStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_REFUND_STATUS = "unknown_refund_status" + UNPAID = "unpaid" + PAID = "paid" + ERRORED = "errored" + + def __str__(self) -> str: + return str(self.value) + + +class RescueProtocol(str, Enum, metaclass=StrEnumMeta): + VNC = "vnc" + SSH = "ssh" + + def __str__(self) -> str: + return str(self.value) + + +class RpnGroupMemberStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_RPN_MEMBER_STATUS = "unknown_rpn_member_status" + PENDING_INVITATION = "pending_invitation" + ACTIVE = "active" + CREATING = "creating" + DELETING = "deleting" + DELETED = "deleted" + + def __str__(self) -> str: + return str(self.value) + + +class RpnGroupType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + LOCAL = "local" + SHARED = "shared" + + def __str__(self) -> str: + return str(self.value) + + +class RpnSanIpType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + SERVER_IP = "server_ip" + RPNV2_SUBNET = "rpnv2_subnet" + + def __str__(self) -> str: + return str(self.value) + + +class RpnSanStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + CREATING = "creating" + ACTIVE = "active" + DELETING = "deleting" + MAINTENANCE = "maintenance" + + def __str__(self) -> str: + return str(self.value) + + +class RpnV2GroupStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_GROUP_STATUS = "unknown_group_status" + CREATING = "creating" + ACTIVE = "active" + UPDATING = "updating" + DELETING = "deleting" + + def __str__(self) -> str: + return str(self.value) + + +class RpnV2GroupType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + STANDARD = "standard" + QINQ = "qinq" + + def __str__(self) -> str: + return str(self.value) + + +class RpnV2MemberStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_MEMBER_STATUS = "unknown_member_status" + CREATING = "creating" + ACTIVE = "active" + UPDATING = "updating" + DELETING = "deleting" + + def __str__(self) -> str: + return str(self.value) + + +class ServerDiskType(str, Enum, metaclass=StrEnumMeta): + SATA = "sata" + SSD = "ssd" + SAS = "sas" + SSHD = "sshd" + USB = "usb" + NVME = "nvme" + + def __str__(self) -> str: + return str(self.value) + + +class ServerInstallStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + BOOTING = "booting" + SETTING_UP_RAID = "setting_up_raid" + PARTITIONING = "partitioning" + FORMATTING = "formatting" + INSTALLING = "installing" + CONFIGURING = "configuring" + CONFIGURING_BOOTLOADER = "configuring_bootloader" + REBOOTING = "rebooting" + INSTALLED = "installed" + + def __str__(self) -> str: + return str(self.value) + + +class ServerStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + DELIVERING = "delivering" + INSTALLING = "installing" + READY = "ready" + STOPPED = "stopped" + ERROR = "error" + LOCKED = "locked" + RESCUE = "rescue" + BUSY = "busy" + + def __str__(self) -> str: + return str(self.value) + + +class ServiceLevelLevel(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + BASIC = "basic" + BUSINESS = "business" + + def __str__(self) -> str: + return str(self.value) + + +class ServiceProvisioningStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + DELIVERING = "delivering" + READY = "ready" + ERROR = "error" + EXPIRING = "expiring" + EXPIRED = "expired" + + def __str__(self) -> str: + return str(self.value) + + +class ServiceType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + SERVICE = "service" + ORDER = "order" + + def __str__(self) -> str: + return str(self.value) + + +@dataclass +class OfferAntiDosInfo: + type_: OfferAntiDosInfoType + + +@dataclass +class OfferBackupInfo: + size: int + + +@dataclass +class OfferBandwidthInfo: + speed: int + + +@dataclass +class OfferLicenseInfo: + bound_to_ip: bool + + +@dataclass +class OfferRPNInfo: + speed: int + + +@dataclass +class OfferSANInfo: + size: int + """ + SAN size (in bytes). + """ + + ha: bool + """ + High availabilty offer. + """ + + device_type: OfferSANInfoType + """ + Type of SAN device (hdd / ssd). + """ + + +@dataclass +class OfferStorageInfo: + max_quota: int + + size: int + + +@dataclass +class IP: + ip_id: str + """ + ID of the IP. + """ + + address: str + """ + Address of the IP. + """ + + reverse: str + """ + Reverse IP value. + """ + + version: IPVersion + """ + Version of IP (v4 or v6). + """ + + cidr: int + """ + Classless InterDomain Routing notation of the IP. + """ + + netmask: str + """ + Network mask of IP. + """ + + semantic: IPSemantic + """ + Semantic of IP. + """ + + gateway: str + """ + Gateway of IP. + """ + + status: IPStatus + """ + Status of the IP. + """ + + +@dataclass +class Offer: + id: int + """ + ID of the offer. + """ + + name: str + """ + Name of the offer. + """ + + catalog: OfferCatalog + """ + Catalog of the offer. + """ + + payment_frequency: OfferPaymentFrequency + """ + Payment frequency of the offer. + """ + + pricing: Optional[Money] + """ + Price of the offer. + """ + + server_info: Optional[OfferServerInfo] + + service_level_info: Optional[OfferServiceLevelInfo] + + rpn_info: Optional[OfferRPNInfo] + + san_info: Optional[OfferSANInfo] + + antidos_info: Optional[OfferAntiDosInfo] + + backup_info: Optional[OfferBackupInfo] + + usb_storage_info: Optional[OfferStorageInfo] + + storage_info: Optional[OfferStorageInfo] + + license_info: Optional[OfferLicenseInfo] + + failover_ip_info: Optional[OfferFailoverIpInfo] + + failover_block_info: Optional[OfferFailoverBlockInfo] + + bandwidth_info: Optional[OfferBandwidthInfo] + + +@dataclass +class NetworkInterface: + card_id: int + """ + Card ID of the network interface. + """ + + device_id: int + """ + Device ID of the network interface. + """ + + mac: str + """ + MAC address of the network interface. + """ + + type_: NetworkInterfaceInterfaceType + """ + Network interface type. + """ + + ips: List[IP] + """ + IPs of the network interface. + """ + + +@dataclass +class OS: + id: int + """ + ID of the OS. + """ + + name: str + """ + Name of the OS. + """ + + type_: OSType + """ + Type of the OS. + """ + + version: str + """ + Version of the OS. + """ + + arch: OSArch + """ + Architecture of the OS. + """ + + allow_custom_partitioning: bool + """ + True if the OS allow custom partitioning. + """ + + allow_ssh_keys: bool + """ + True if the OS allow SSH Keys. + """ + + requires_user: bool + """ + True if the OS requires user. + """ + + requires_admin_password: bool + """ + True if the OS requires admin password. + """ + + requires_panel_password: bool + """ + True if the OS requires panel password. + """ + + allowed_filesystems: List[PartitionFileSystem] + """ + True if the OS allow file systems. + """ + + requires_license: bool + """ + True if the OS requires license. + """ + + license_offers: List[Offer] + """ + License offers available with the OS. + """ + + display_name: str + """ + Display name of the OS. + """ + + password_regex: str + """ + Regex used to validate the installation passwords. + """ + + hostname_max_length: int + """ + Hostname max length. + """ + + max_partitions: Optional[int] + """ + Maximum number of partitions which can be created. + """ + + panel_password_regex: Optional[str] + """ + Regex used to validate the panel installation password. + """ + + requires_valid_hostname: Optional[bool] + """ + If both requires_valid_hostname & hostname_regex are set, it means that at least one of the criterias must be valid. + """ + + hostname_regex: Optional[str] + """ + If both requires_valid_hostname & hostname_regex are set, it means that at least one of the criterias must be valid. + """ + + released_at: Optional[datetime] + """ + OS release date. + """ + + +@dataclass +class ServerLocation: + rack: str + + room: str + + datacenter_name: str + + +@dataclass +class ServerOption: + options: List[ServerOption] + + offer: Optional[Offer] + + created_at: Optional[datetime] + + updated_at: Optional[datetime] + + expired_at: Optional[datetime] + + +@dataclass +class ServiceLevel: + offer_id: int + """ + Offer ID of service level. + """ + + level: ServiceLevelLevel + """ + Level type of service level. + """ + + +@dataclass +class RpnSan: + id: int + """ + RPN SAN ID. + """ + + datacenter_name: str + """ + Datacenter location. + """ + + organization_id: str + """ + Organization ID. + """ + + project_id: str + """ + Project ID. + """ + + server_hostname: str + """ + RPN SAN server hostname. + """ + + iqn_suffix: str + """ + IQN suffix. + """ + + offer_id: int + """ + Offer ID. + """ + + created_at: Optional[datetime] + """ + Date of creation of the RPN SAN. + """ + + offer_name: str + """ + Offer description. + """ + + status: RpnSanStatus + """ + Status. + """ + + storage_size: int + """ + RPN SAN storage size. + """ + + iqn: str + + rpnv1_compatible: bool + """ + True if the SAN is compatible with the RPNv1 technology. + """ + + rpnv1_implicit: bool + """ + True if the offer supports the RPNv1 implicitly, false if it must to be added to a group to support RPNv1. + """ + + offer: Optional[Offer] + + delivered_at: Optional[datetime] + """ + RPN SAN delivery date. + """ + + terminated_at: Optional[datetime] + """ + RPN SAN termination date. + """ + + expires_at: Optional[datetime] + """ + RPN SAN expiration date. + """ + + +@dataclass +class RpnGroup: + id: int + """ + Rpn group member ID. + """ + + name: str + """ + Rpn group name. + """ + + type_: RpnGroupType + """ + Rpn group type (local or shared). + """ + + active: bool + """ + Whether the group is active or not. + """ + + owner: str + """ + RPN group owner. + """ + + members_count: int + """ + Total number of members. + """ + + organization_id: str + """ + Rpn group organization ID. + """ + + project_id: str + """ + Rpn group project ID. + """ + + created_at: Optional[datetime] + """ + Rpn group creation date. + """ + + +@dataclass +class RpnV2GroupSubnet: + address: str + + cidr: int + + +@dataclass +class Server: + id: int + """ + ID of the server. + """ + + organization_id: str + """ + Organization ID the server is attached to. + """ + + project_id: str + """ + Project ID the server is attached to. + """ + + hostname: str + """ + Hostname of the server. + """ + + rebooted_at: Optional[datetime] + """ + Date of last reboot of the server. + """ + + status: ServerStatus + """ + Status of the server. + """ + + abuse_contact: str + """ + Abuse contact of the server. + """ + + interfaces: List[NetworkInterface] + """ + Network interfaces of the server. + """ + + zone: Zone + """ + The zone in which is the server. + """ + + options: List[ServerOption] + """ + Options subscribe on the server. + """ + + has_bmc: bool + """ + Boolean if the server has a BMC. + """ + + tags: List[str] + """ + Array of customs tags attached to the server. + """ + + is_outsourced: bool + """ + Whether the server is outsourced or not. + """ + + ipv6_slaac: bool + """ + Whether or not you can enable/disable the IPv6. + """ + + qinq: bool + """ + Whether the server is compatible with QinQ. + """ + + is_rpnv2_member: bool + """ + Whether or not the server is already part of an rpnv2 group. + """ + + created_at: Optional[datetime] + """ + Date of creation of the server. + """ + + updated_at: Optional[datetime] + """ + Date of last modification of the server. + """ + + expired_at: Optional[datetime] + """ + Date of release of the server. + """ + + offer: Optional[Offer] + """ + Offer of the server. + """ + + location: Optional[ServerLocation] + """ + Location of the server. + """ + + os: Optional[OS] + """ + OS installed on the server. + """ + + level: Optional[ServiceLevel] + """ + Service level of the server. + """ + + rescue_os: Optional[OS] + """ + Rescue OS of the server. + """ + + +@dataclass +class FailoverBlock: + id: int + """ + ID of the failover block. + """ + + address: str + """ + IP of the failover block. + """ + + nameservers: List[str] + """ + Name servers. + """ + + ip_version: FailoverBlockVersion + """ + IP version of the failover block. + """ + + cidr: int + """ + Classless InterDomain Routing notation of the failover block. + """ + + netmask: str + """ + Netmask of the failover block. + """ + + gateway_ip: str + """ + Gateway IP of the failover block. + """ + + +@dataclass +class RpnSanIpRpnV2Group: + id: int + + name: str + + +@dataclass +class RpnSanIpServer: + id: int + + hostname: str + + datacenter_name: str + + +@dataclass +class RpnSanServer: + id: int + """ + The RPN SAN server ID. + """ + + datacenter_name: str + """ + The RPN SAN server datacenter name. + """ + + hostname: str + """ + The RPN SAN server hostname. + """ + + sans: List[RpnSan] + """ + RPN SANs linked to the RPN SAN server. + """ + + zone: Zone + """ + The RPN SAN server zone. + """ + + +@dataclass +class RpnV2Group: + id: int + """ + RPN V2 group ID. + """ + + name: str + """ + RPN V2 group name. + """ + + compatible_rpnv1: bool + """ + Whether or not the RPN V1 compatibility was enabled. + """ + + organization_id: str + """ + Organization ID of the RPN V2 group. + """ + + project_id: str + """ + Project ID of the RPN V2 group. + """ + + type_: RpnV2GroupType + """ + RPN V2 group type (qing / standard). + """ + + status: RpnV2GroupStatus + """ + RPN V2 group status. + """ + + owner: str + """ + RPN V2 group owner. + """ + + members_count: int + """ + Total number of members. + """ + + gateway: str + """ + RPN V2 gateway. + """ + + subnet: Optional[RpnV2GroupSubnet] + """ + RPN V2 subnet. + """ + + rpnv1_group: Optional[RpnGroup] + """ + The RPNv1 group (if the compatibility was enabled). + """ + + +@dataclass +class RpnV2Member: + id: int + """ + RPN V2 member ID. + """ + + status: RpnV2MemberStatus + """ + RPN V2 member status. + """ + + vlan: str + """ + RPN V2 member VLAN. + """ + + speed: Optional[int] + """ + RPN speed. + """ + + server: Optional[Server] + + rpnv1_group: Optional[RpnGroup] + + +@dataclass +class ServerDisk: + id: int + + connector: str + + type_: ServerDiskType + + capacity: int + + is_addon: bool + + +@dataclass +class Service: + id: int + """ + ID of the service. + """ + + provisioning_status: ServiceProvisioningStatus + """ + Provisioning status of the service. + """ + + type_: ServiceType + """ + Service type, either order or service. + """ + + resource_id: Optional[int] + """ + Resource ID of the service. + """ + + offer: Optional[Offer] + """ + Offer of the service. + """ + + created_at: Optional[datetime] + """ + Creation date of the service. + """ + + delivered_at: Optional[datetime] + """ + Delivery date of the service. + """ + + terminated_at: Optional[datetime] + """ + Terminatation date of the service. + """ + + expires_at: Optional[datetime] + """ + Expiration date of the service. + """ + + +@dataclass +class GetIPv6BlockQuotasResponseQuota: + quota: int + + cidr: int + + +@dataclass +class InstallPartition: + file_system: PartitionFileSystem + """ + File system of the installation partition. + """ + + raid_level: RaidArrayRaidLevel + """ + RAID level of the installation partition. + """ + + capacity: int + """ + Capacity of the installation partition. + """ + + connectors: List[str] + """ + Connectors of the installation partition. + """ + + mount_point: Optional[str] + """ + Mount point of the installation partition. + """ + + +@dataclass +class FailoverIP: + id: int + """ + ID of the failover IP. + """ + + address: str + """ + IP of the failover IP. + """ + + reverse: str + """ + Reverse IP value. + """ + + ip_version: FailoverIPVersion + """ + IP version of the failover IP. + """ + + cidr: int + """ + Classless InterDomain Routing notation of the failover IP. + """ + + netmask: str + """ + Netmask of the failover IP. + """ + + gateway_ip: str + """ + Gateway IP of the failover IP. + """ + + status: FailoverIPStatus + """ + Status of the IP failover. + """ + + type_: FailoverIPInterfaceType + """ + The interface type. + """ + + mac: Optional[str] + """ + MAC address of the IP failover. + """ + + server_id: Optional[int] + """ + Server ID linked to the IP failover. + """ + + block: Optional[FailoverBlock] + """ + Block of the IP failover. + """ + + server_zone: Optional[str] + """ + The server zone (if assigned). + """ + + +@dataclass +class ListIPv6BlockSubnetsAvailableResponseSubnet: + address: str + + cidr: int + + +@dataclass +class InvoiceSummary: + id: int + + status: InvoiceStatus + + payment_method: InvoicePaymentMethod + + transaction_id: int + + total_with_taxes: Optional[Money] + + total_without_taxes: Optional[Money] + + created_at: Optional[datetime] + + paid_at: Optional[datetime] + + +@dataclass +class RpnSanIp: + type_: RpnSanIpType + """ + IP type (server | rpnv2_subnet). + """ + + ip: Optional[IP] + """ + An IP object. + """ + + server: Optional[RpnSanIpServer] + + rpnv2_group: Optional[RpnSanIpRpnV2Group] + + +@dataclass +class RefundSummary: + id: int + + status: RefundStatus + + method: RefundMethod + + total_with_taxes: Optional[Money] + + total_without_taxes: Optional[Money] + + created_at: Optional[datetime] + + refunded_at: Optional[datetime] + + +@dataclass +class RpnGroupMember: + id: int + """ + Rpn group member ID. + """ + + status: RpnGroupMemberStatus + """ + RPN group member status. + """ + + group_id: int + """ + RPN group ID. + """ + + group_name: str + """ + RPN group name. + """ + + group_owner: str + """ + RPN group owner. + """ + + owner: str + """ + RPN member owner. + """ + + san_server: Optional[RpnSanServer] + """ + Authorized RPN SAN server. + """ + + server: Optional[Server] + """ + Authorized rpn v1 capable server. + """ + + speed: Optional[int] + """ + RPN speed. + """ + + +@dataclass +class RpnSanSummary: + id: int + """ + RPN SAN ID. + """ + + datacenter_name: str + """ + Datacenter location. + """ + + organization_id: str + """ + Organization ID. + """ + + project_id: str + """ + Project ID. + """ + + server_hostname: str + """ + RPN SAN server hostname. + """ + + iqn_suffix: str + """ + IQN suffix. + """ + + offer_id: int + """ + Offer ID. + """ + + created_at: Optional[datetime] + """ + Date of creation of the RPN SAN. + """ + + offer_name: str + """ + Offer description. + """ + + status: RpnSanStatus + """ + Status. + """ + + storage_size: int + """ + RPN SAN storage size. + """ + + rpnv1_compatible: bool + """ + True if the SAN is compatible with the RPNv1 technology. + """ + + rpnv1_implicit: bool + """ + True if the offer supports the RPNv1 implicitly, false if it must to be added to a group to support RPNv1. + """ + + delivered_at: Optional[datetime] + """ + RPN SAN delivery date. + """ + + terminated_at: Optional[datetime] + """ + RPN SAN termination date. + """ + + expires_at: Optional[datetime] + """ + RPN SAN expiration date. + """ + + +@dataclass +class RpnServerCapability: + id: int + """ + Server ID. + """ + + hostname: str + """ + Server hostname. + """ + + datacenter_name: str + """ + Server datacenter name. + """ + + zone: Zone + """ + Server zone. + """ + + compatible_qinq: bool + """ + True if server is compatible with QinQ protocol (rpn v2). + """ + + can_join_qinq_group: bool + """ + True if server can join a QinQ group. + """ + + rpnv1_group_count: int + """ + Times server is linked in a rpnv1 group. + """ + + rpnv2_group_count: int + """ + Times server is linked in a rpnv2 group. + """ + + can_join_rpnv2_group: bool + """ + True if server can join an rpnv2 group. + """ + + ip_address: Optional[str] + """ + Private IP address (if rpn compatiblle). + """ + + rpn_version: Optional[int] + """ + Supported rpn version. + """ + + +@dataclass +class Log: + id: int + """ + RPN V2 log ID. + """ + + action: LogAction + """ + Which action was performed. + """ + + status: LogStatus + """ + Action status. + """ + + group: Optional[RpnV2Group] + """ + RPN V2 group. + """ + + member: Optional[RpnV2Member] + """ + RPN V2 member (if appliable). + """ + + created_at: Optional[datetime] + """ + Creation date. + """ + + finished_at: Optional[datetime] + """ + Completion date. + """ + + +@dataclass +class ServerEvent: + event_id: int + """ + ID of the event. + """ + + description: str + """ + Descriptiion of the event. + """ + + date: Optional[datetime] + """ + Date of the event. + """ + + +@dataclass +class ServerSummary: + id: int + """ + ID of the server. + """ + + datacenter_name: str + """ + Datacenter of the server. + """ + + organization_id: str + """ + Organization ID the server is attached to. + """ + + project_id: str + """ + Project ID the server is attached to. + """ + + hostname: str + """ + Hostname of the server. + """ + + created_at: Optional[datetime] + """ + Date of creation of the server. + """ + + updated_at: Optional[datetime] + """ + Date of last modification of the server. + """ + + expired_at: Optional[datetime] + """ + Date of release of the server. + """ + + offer_id: int + """ + Offer ID of the server. + """ + + offer_name: str + """ + Offer name of the server. + """ + + status: ServerStatus + """ + Status of the server. + """ + + interfaces: List[NetworkInterface] + """ + Network interfaces of the server. + """ + + zone: Zone + """ + The zone in which is the server. + """ + + is_outsourced: bool + """ + Whether the server is outsourced or not. + """ + + qinq: bool + """ + Whether the server is compatible with QinQ. + """ + + os_id: Optional[int] + """ + OS ID installed on server. + """ + + level: Optional[ServiceLevel] + """ + Service level of the server. + """ + + rpn_version: Optional[int] + """ + Supported RPN version. + """ + + +@dataclass +class CPU: + name: str + """ + Name of CPU. + """ + + core_count: int + """ + Number of cores of the CPU. + """ + + thread_count: int + """ + Number of threads of the CPU. + """ + + frequency: int + """ + Frequency of the CPU. + """ + + +@dataclass +class Disk: + capacity: int + """ + Capacity of the disk. + """ + + type_: ServerDiskType + """ + Type of the disk. + """ + + +@dataclass +class Memory: + capacity: int + """ + Capacity of the memory. + """ + + type_: MemoryType + """ + Type of the memory. + """ + + frequency: int + """ + Frequency of the memory. + """ + + is_ecc: bool + """ + True if the memory is an error-correcting code memory. + """ + + +@dataclass +class PersistentMemory: + capacity: int + """ + Capacity of the persistent memory. + """ + + frequency: int + """ + Frequency of the persistent memory. + """ + + model: str + """ + Model of the persistent memory. + """ + + +@dataclass +class RaidController: + model: str + """ + Model of the RAID controller. + """ + + raid_level: List[str] + """ + RAID level of the RAID controller. + """ + + +@dataclass +class RaidArray: + raid_level: RaidArrayRaidLevel + """ + The RAID level. + """ + + disks: List[ServerDisk] + """ + Disks on the RAID controller. + """ + + +@dataclass +class Partition: + type_: PartitionType + """ + Type of the partition. + """ + + file_system: PartitionFileSystem + """ + File system of the partition. + """ + + raid_level: RaidArrayRaidLevel + """ + Raid level of the partition. + """ + + capacity: int + """ + Capacity of the partition. + """ + + connectors: List[str] + """ + Connectors of the partition. + """ + + mount_point: Optional[str] + """ + Mount point of the partition. + """ + + +@dataclass +class UpdatableRaidArray: + raid_level: RaidArrayRaidLevel + """ + The RAID level. + """ + + disk_ids: List[int] + """ + The list of Disk ID of the updatable RAID. + """ + + +@dataclass +class AttachFailoverIPToMacAddressRequest: + ip_id: int + """ + ID of the failover IP. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + type_: Optional[AttachFailoverIPToMacAddressRequestMacType] + """ + A mac type. + """ + + mac: Optional[str] + """ + A valid mac address (existing or not). + """ + + +@dataclass +class AttachFailoverIPsRequest: + server_id: int + """ + ID of the server. + """ + + fips_ids: List[int] + """ + List of ID of failovers IP to attach. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class BMCAccess: + url: str + """ + URL to access to the server console. + """ + + login: str + """ + The login to use for the BMC (Baseboard Management Controller) access authentification. + """ + + password: str + """ + The password to use for the BMC (Baseboard Management Controller) access authentification. + """ + + status: BMCAccessStatus + """ + Status of the connection. + """ + + expires_at: Optional[datetime] + """ + The date after which the BMC (Baseboard Management Controller) access will be closed. + """ + + +@dataclass +class Backup: + id: int + """ + ID of the backup. + """ + + login: str + """ + Login of the backup. + """ + + server: str + """ + Server of the backup. + """ + + status: BackupStatus + """ + Status of the backup. + """ + + acl_enabled: bool + """ + ACL enable boolean of the backup. + """ + + autologin: bool + """ + Autologin boolean of the backup. + """ + + quota_space: int + """ + Total quota space of the backup. + """ + + quota_space_used: int + """ + Quota space used of the backup. + """ + + quota_files: int + """ + Total quota files of the backup. + """ + + quota_files_used: int + """ + Quota files used of the backup. + """ + + +@dataclass +class BillingApiCanOrderRequest: + project_id: Optional[str] + + +@dataclass +class BillingApiDownloadInvoiceRequest: + invoice_id: int + + +@dataclass +class BillingApiDownloadRefundRequest: + refund_id: int + + +@dataclass +class BillingApiGetInvoiceRequest: + invoice_id: int + + +@dataclass +class BillingApiGetRefundRequest: + refund_id: int + + +@dataclass +class BillingApiListInvoicesRequest: + page: Optional[int] + + page_size: Optional[int] + + order_by: Optional[ListInvoicesRequestOrderBy] + + project_id: Optional[str] + + +@dataclass +class BillingApiListRefundsRequest: + page: Optional[int] + + page_size: Optional[int] + + order_by: Optional[ListRefundsRequestOrderBy] + + project_id: Optional[str] + + +@dataclass +class CanOrderResponse: + can_order: bool + + quota_ok: bool + + phone_confirmed: bool + + email_confirmed: bool + + user_confirmed: bool + + payment_mode: bool + + billing_ok: bool + + message: Optional[str] + + +@dataclass +class CancelServerInstallRequest: + server_id: int + """ + Server ID of the server to cancel install. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class CreateFailoverIPsRequest: + offer_id: int + """ + Failover IP offer ID. + """ + + quantity: int + """ + Quantity. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class CreateFailoverIPsResponse: + total_count: int + + services: List[Service] + + +@dataclass +class CreateServerRequest: + offer_id: int + """ + Offer ID of the new server. + """ + + server_option_ids: List[int] + """ + Server option IDs of the new server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + project_id: Optional[str] + """ + Project ID of the new server. + """ + + datacenter_name: Optional[str] + """ + Datacenter name of the new server. + """ + + +@dataclass +class DeleteFailoverIPRequest: + ip_id: int + """ + ID of the failover IP to delete. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class DeleteServerRequest: + server_id: int + """ + Server ID to delete. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class DeleteServiceRequest: + service_id: int + """ + ID of the service. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class DetachFailoverIPFromMacAddressRequest: + ip_id: int + """ + ID of the failover IP. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class DetachFailoverIPsRequest: + fips_ids: List[int] + """ + List of IDs of failovers IP to detach. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetBMCAccessRequest: + server_id: int + """ + ID of the server to get BMC access. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetFailoverIPRequest: + ip_id: int + """ + ID of the failover IP. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetIPv6BlockQuotasResponse: + quotas: List[GetIPv6BlockQuotasResponseQuota] + """ + Quota for each CIDR of IPv6 block. + """ + + total_count: int + """ + Total count of quotas. + """ + + +@dataclass +class GetOSRequest: + os_id: int + """ + ID of the OS. + """ + + server_id: int + """ + ID of the server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class GetOfferRequest: + offer_id: int + """ + ID of offer. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class GetOrderedServiceRequest: + ordered_service_id: int + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetRaidRequest: + server_id: int + """ + ID of the server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetRemainingQuotaRequest: + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class GetRemainingQuotaResponse: + failover_ip_quota: int + """ + Current failover IP quota. + """ + + failover_ip_remaining_quota: int + """ + Remaining failover IP quota. + """ + + failover_block_quota: int + """ + Current failover block quota. + """ + + failover_block_remaining_quota: int + """ + Remaining failover block quota. + """ + + +@dataclass +class GetRescueRequest: + server_id: int + """ + ID of the server to get rescue. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetRpnStatusResponse: + status: GetRpnStatusResponseStatus + """ + If status = 'operational', you can perform rpn actions in write. + """ + + operations_left: Optional[int] + """ + Number of operations left to perform before being operational. + """ + + +@dataclass +class GetServerBackupRequest: + server_id: int + """ + Server ID of the backup. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetServerDefaultPartitioningRequest: + server_id: int + """ + ID of the server. + """ + + os_id: int + """ + OS ID of the default partitioning. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetServerInstallRequest: + server_id: int + """ + Server ID of the server to install. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetServerRequest: + server_id: int + """ + ID of the server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetServiceRequest: + service_id: int + """ + ID of the service. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class IPv6Block: + id: int + """ + ID of the IPv6. + """ + + address: str + """ + Address of the IPv6. + """ + + duid: str + """ + DUID of the IPv6. + """ + + nameservers: List[str] + """ + DNS linked to the IPv6. + """ + + cidr: int + """ + Classless InterDomain Routing notation of the IPv6. + """ + + subnets: List[IPv6Block] + """ + All IPv6 subnets. + """ + + delegation_status: IPv6BlockDelegationStatus + """ + The nameservers delegation status. + """ + + +@dataclass +class IPv6BlockApiCreateIPv6BlockRequest: + project_id: Optional[str] + """ + ID of the project. + """ + + +@dataclass +class IPv6BlockApiCreateIPv6BlockSubnetRequest: + block_id: int + """ + ID of the IPv6 block. + """ + + address: str + """ + Address of the IPv6. + """ + + cidr: int + """ + Classless InterDomain Routing notation of the IPv6. + """ + + +@dataclass +class IPv6BlockApiDeleteIPv6BlockRequest: + block_id: int + """ + ID of the IPv6 block to delete. + """ + + +@dataclass +class IPv6BlockApiGetIPv6BlockQuotasRequest: + project_id: Optional[str] + """ + ID of the project. + """ + + +@dataclass +class IPv6BlockApiGetIPv6BlockRequest: + project_id: Optional[str] + """ + ID of the project. + """ + + +@dataclass +class IPv6BlockApiListIPv6BlockSubnetsAvailableRequest: + block_id: int + """ + ID of the IPv6 block. + """ + + +@dataclass +class IPv6BlockApiUpdateIPv6BlockRequest: + block_id: int + """ + ID of the IPv6 block. + """ + + nameservers: Optional[List[str]] + """ + DNS to link to the IPv6. + """ + + +@dataclass +class InstallServerRequest: + server_id: int + """ + Server ID to install. + """ + + os_id: int + """ + OS ID to install on the server. + """ + + hostname: str + """ + Hostname of the server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + user_login: Optional[str] + """ + User to install on the server. + """ + + user_password: Optional[str] + """ + User password to install on the server. + """ + + panel_password: Optional[str] + """ + Panel password to install on the server. + """ + + root_password: Optional[str] + """ + Root password to install on the server. + """ + + partitions: Optional[List[InstallPartition]] + """ + Partitions to install on the server. + """ + + ssh_key_ids: Optional[List[str]] + """ + SSH key IDs authorized on the server. + """ + + license_offer_id: Optional[int] + """ + Offer ID of license to install on server. + """ + + ip_id: Optional[int] + """ + IP to link at the license to install on server. + """ + + +@dataclass +class Invoice: + id: int + + status: InvoiceStatus + + payment_method: InvoicePaymentMethod + + content: str + + transaction_id: int + + total_with_taxes: Optional[Money] + + total_without_taxes: Optional[Money] + + created_at: Optional[datetime] + + paid_at: Optional[datetime] + + +@dataclass +class ListFailoverIPsRequest: + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of failovers IP per page. + """ + + order_by: Optional[ListFailoverIPsRequestOrderBy] + """ + Order of the failovers IP. + """ + + project_id: Optional[str] + """ + Filter failovers IP by project ID. + """ + + search: Optional[str] + """ + Filter failovers IP which matching with this field. + """ + + only_available: Optional[bool] + """ + True: return all failovers IP not attached on server +false: return all failovers IP attached on server. + """ + + +@dataclass +class ListFailoverIPsResponse: + total_count: int + """ + Total count of matching failovers IP. + """ + + failover_ips: List[FailoverIP] + """ + List of failover IPs that match filters. + """ + + +@dataclass +class ListIPv6BlockSubnetsAvailableResponse: + subnet_availables: List[ListIPv6BlockSubnetsAvailableResponseSubnet] + """ + All available address and CIDR available in subnet. + """ + + total_count: int + """ + Total count of available subnets. + """ + + +@dataclass +class ListInvoicesResponse: + total_count: int + + invoices: List[InvoiceSummary] + + +@dataclass +class ListIpsResponse: + total_count: int + """ + Total count of authorized IPs. + """ + + ips: List[RpnSanIp] + """ + List of authorized IPs. + """ + + +@dataclass +class ListOSRequest: + server_id: int + """ + Filter OS by compatible server ID. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of OS per page. + """ + + order_by: Optional[ListOSRequestOrderBy] + """ + Order of the OS. + """ + + type_: Optional[OSType] + """ + Type of the OS. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class ListOSResponse: + total_count: int + """ + Total count of matching OS. + """ + + os: List[OS] + """ + OS that match filters. + """ + + +@dataclass +class ListOffersRequest: + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of offer per page. + """ + + order_by: Optional[ListOffersRequestOrderBy] + """ + Order of the offers. + """ + + commercial_range: Optional[str] + """ + Filter on commercial range. + """ + + catalog: Optional[OfferCatalog] + """ + Filter on catalog. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + is_failover_ip: Optional[bool] + """ + Get the current failover IP offer. + """ + + is_failover_block: Optional[bool] + """ + Get the current failover IP block offer. + """ + + sold_in: Optional[List[str]] + """ + Filter offers depending on their datacenter. + """ + + available_only: Optional[bool] + """ + Set this filter to true to only return available offers. + """ + + is_rpn_san: Optional[bool] + """ + Get the RPN SAN offers. + """ + + +@dataclass +class ListOffersResponse: + total_count: int + """ + Total count of matching offers. + """ + + offers: List[Offer] + """ + Offers that match filters. + """ + + +@dataclass +class ListRefundsResponse: + total_count: int + + refunds: List[RefundSummary] + + +@dataclass +class ListRpnCapableSanServersResponse: + total_count: int + """ + Total count of rpn capable san servers. + """ + + san_servers: List[RpnSanServer] + """ + List of san servers. + """ + + +@dataclass +class ListRpnCapableServersResponse: + total_count: int + """ + Total count of rpn capable servers. + """ + + servers: List[Server] + """ + List of servers. + """ + + +@dataclass +class ListRpnGroupMembersResponse: + total_count: int + """ + Total count of rpn v1 group members. + """ + + members: List[RpnGroupMember] + """ + List of rpn v1 group members. + """ + + +@dataclass +class ListRpnGroupsResponse: + total_count: int + """ + Total count of rpn groups. + """ + + rpn_groups: List[RpnGroup] + """ + List of rpn v1 groups. + """ + + +@dataclass +class ListRpnInvitesResponse: + total_count: int + """ + Total count of invites. + """ + + members: List[RpnGroupMember] + """ + List of invites. + """ + + +@dataclass +class ListRpnSansResponse: + total_count: int + """ + Total count of matching RPN SANs. + """ + + rpn_sans: List[RpnSanSummary] + """ + List of RPN SANs that match filters. + """ + + +@dataclass +class ListRpnServerCapabilitiesResponse: + total_count: int + """ + Total count of servers. + """ + + servers: List[RpnServerCapability] + """ + List of servers and their RPN capabilities. + """ + + +@dataclass +class ListRpnV2CapableResourcesResponse: + total_count: int + """ + Total count of matching rpn v2 capable resources. + """ + + servers: List[Server] + """ + List of rpn v2 capable resources that match filters. + """ + + +@dataclass +class ListRpnV2GroupLogsResponse: + total_count: int + """ + Total count of matching rpn v2 logs. + """ + + logs: List[Log] + """ + List of rpn v2 logs that match filters. + """ + + +@dataclass +class ListRpnV2GroupsResponse: + total_count: int + """ + Total count of matching rpn v2 groups. + """ + + rpn_groups: List[RpnV2Group] + """ + List of rpn v2 groups that match filters. + """ + + +@dataclass +class ListRpnV2MembersResponse: + total_count: int + """ + Total count of matching rpn v2 group members. + """ + + members: List[RpnV2Member] + """ + List of rpn v2 group members that match filters. + """ + + +@dataclass +class ListServerDisksRequest: + server_id: int + """ + Server ID of the server disks. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of server disk per page. + """ + + order_by: Optional[ListServerDisksRequestOrderBy] + """ + Order of the server disks. + """ + + +@dataclass +class ListServerDisksResponse: + total_count: int + """ + Total count of matching server disks. + """ + + disks: List[ServerDisk] + """ + Server disks that match filters. + """ + + +@dataclass +class ListServerEventsRequest: + server_id: int + """ + Server ID of the server events. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of server event per page. + """ + + order_by: Optional[ListServerEventsRequestOrderBy] + """ + Order of the server events. + """ + + +@dataclass +class ListServerEventsResponse: + total_count: int + """ + Total count of matching server events. + """ + + events: List[ServerEvent] + """ + Server events that match filters. + """ + + +@dataclass +class ListServersRequest: + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of server per page. + """ + + order_by: Optional[ListServersRequestOrderBy] + """ + Order of the servers. + """ + + project_id: Optional[str] + """ + Filter servers by project ID. + """ + + search: Optional[str] + """ + Filter servers by hostname. + """ + + +@dataclass +class ListServersResponse: + total_count: int + """ + Total count of matching servers. + """ + + servers: List[ServerSummary] + """ + Servers that match filters. + """ + + +@dataclass +class ListServicesRequest: + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of service per page. + """ + + order_by: Optional[ListServicesRequestOrderBy] + """ + Order of the services. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class ListServicesResponse: + total_count: int + """ + Total count of matching services. + """ + + services: List[Service] + """ + Services that match filters. + """ + + +@dataclass +class ListSubscribableServerOptionsRequest: + server_id: int + """ + Server ID of the subscribable server options. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of subscribable server option per page. + """ + + +@dataclass +class ListSubscribableServerOptionsResponse: + total_count: int + """ + Total count of matching subscribable server options. + """ + + server_options: List[Offer] + """ + Server options that match filters. + """ + + +@dataclass +class OfferFailoverBlockInfo: + onetime_fees: Optional[Offer] + + +@dataclass +class OfferFailoverIpInfo: + onetime_fees: Optional[Offer] + + +@dataclass +class OfferServerInfo: + bandwidth: int + + stock: OfferServerInfoStock + + commercial_range: str + + disks: List[Disk] + + cpus: List[CPU] + + memories: List[Memory] + + persistent_memories: List[PersistentMemory] + + raid_controllers: List[RaidController] + + available_options: List[Offer] + + connectivity: int + + stock_by_datacenter: Dict[str, OfferServerInfoStock] + + rpn_version: Optional[int] + + onetime_fees: Optional[Offer] + + +@dataclass +class OfferServiceLevelInfo: + support_ticket: bool + + support_phone: bool + + sales_support: bool + + git: str + + sla: float + + priority_support: bool + + high_rpn_bandwidth: bool + + customization: bool + + antidos: bool + + extra_failover_quota: int + + available_options: List[Offer] + + +@dataclass +class Raid: + raid_arrays: List[RaidArray] + """ + Details about the RAID controller. + """ + + +@dataclass +class RebootServerRequest: + server_id: int + """ + Server ID to reboot. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class Refund: + id: int + + status: RefundStatus + + method: RefundMethod + + content: str + + total_with_taxes: Optional[Money] + + total_without_taxes: Optional[Money] + + created_at: Optional[datetime] + + refunded_at: Optional[datetime] + + +@dataclass +class Rescue: + os_id: int + """ + OS ID of the rescue. + """ + + login: str + """ + Login of the rescue. + """ + + password: str + """ + Password of the rescue. + """ + + protocol: RescueProtocol + """ + Protocol of the resuce. + """ + + +@dataclass +class RpnApiGetRpnStatusRequest: + project_id: Optional[str] + """ + A project ID. + """ + + rpnv1_group_id: Optional[int] + """ + An RPN v1 group ID. + """ + + rpnv2_group_id: Optional[int] + """ + An RPN v2 group ID. + """ + + +@dataclass +class RpnApiListRpnServerCapabilitiesRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of servers per page. + """ + + order_by: Optional[ListRpnServerCapabilitiesRequestOrderBy] + """ + Order of the servers. + """ + + project_id: Optional[str] + """ + Filter servers by project ID. + """ + + +@dataclass +class RpnSanApiAddIpRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + ip_ids: List[int] + """ + An array of IP ID. + """ + + +@dataclass +class RpnSanApiCreateRpnSanRequest: + offer_id: int + """ + Offer ID. + """ + + project_id: Optional[str] + """ + Your project ID. + """ + + +@dataclass +class RpnSanApiDeleteRpnSanRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + +@dataclass +class RpnSanApiGetRpnSanRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + +@dataclass +class RpnSanApiListAvailableIpsRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + type_: Optional[RpnSanIpType] + """ + Filter by IP type (server | rpnv2_subnet). + """ + + +@dataclass +class RpnSanApiListIpsRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + type_: Optional[RpnSanIpType] + """ + Filter by IP type (server | rpnv2_subnet). + """ + + +@dataclass +class RpnSanApiListRpnSansRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of RPN SANs per page. + """ + + order_by: Optional[ListRpnSansRequestOrderBy] + """ + Order of the RPN SANs. + """ + + project_id: Optional[str] + """ + Filter RPN SANs by project ID. + """ + + +@dataclass +class RpnSanApiRemoveIpRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + ip_ids: List[int] + """ + An array of IP ID. + """ + + +@dataclass +class RpnV1ApiAcceptRpnInviteRequest: + member_id: int + """ + The member ID. + """ + + +@dataclass +class RpnV1ApiAddRpnGroupMembersRequest: + group_id: int + """ + The rpn v1 group ID. + """ + + server_ids: Optional[List[int]] + """ + A collection of rpn v1 capable server IDs. + """ + + san_server_ids: Optional[List[int]] + """ + A collection of rpn v1 capable RPN SAN server IDs. + """ + + +@dataclass +class RpnV1ApiCreateRpnGroupRequest: + name: str + """ + Rpn v1 group name. + """ + + server_ids: Optional[List[int]] + """ + A collection of rpn v1 capable servers. + """ + + san_server_ids: Optional[List[int]] + """ + A collection of rpn v1 capable rpn sans servers. + """ + + project_id: Optional[str] + """ + A project ID. + """ + + +@dataclass +class RpnV1ApiDeleteRpnGroupMembersRequest: + group_id: int + """ + The rpn v1 group ID. + """ + + member_ids: List[int] + """ + A collection of rpn v1 group members IDs. + """ + + +@dataclass +class RpnV1ApiDeleteRpnGroupRequest: + group_id: int + """ + Rpn v1 group ID. + """ + + +@dataclass +class RpnV1ApiGetRpnGroupRequest: + group_id: int + """ + Rpn v1 group ID. + """ + + +@dataclass +class RpnV1ApiLeaveRpnGroupRequest: + group_id: int + """ + The RPN V1 group ID. + """ + + member_ids: List[int] + """ + A collection of rpn v1 group members IDs. + """ + + project_id: Optional[str] + """ + A project ID. + """ + + +@dataclass +class RpnV1ApiListRpnCapableSanServersRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn capable resources per page. + """ + + order_by: Optional[ListRpnCapableSanServersRequestOrderBy] + """ + Order of the rpn capable resources. + """ + + project_id: Optional[str] + """ + Filter rpn capable resources by project ID. + """ + + +@dataclass +class RpnV1ApiListRpnCapableServersRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn capable resources per page. + """ + + order_by: Optional[ListRpnCapableServersRequestOrderBy] + """ + Order of the rpn capable resources. + """ + + project_id: Optional[str] + """ + Filter rpn capable resources by project ID. + """ + + +@dataclass +class RpnV1ApiListRpnGroupMembersRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v1 group members per page. + """ + + order_by: Optional[ListRpnGroupMembersRequestOrderBy] + """ + Order of the rpn v1 group members. + """ + + group_id: int + """ + Filter rpn v1 group members by group ID. + """ + + project_id: Optional[str] + """ + A project ID. + """ + + +@dataclass +class RpnV1ApiListRpnGroupsRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v1 groups per page. + """ + + order_by: Optional[ListRpnGroupsRequestOrderBy] + """ + Order of the rpn v1 groups. + """ + + project_id: Optional[str] + """ + Filter rpn v1 groups by project ID. + """ + + +@dataclass +class RpnV1ApiListRpnInvitesRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn capable resources per page. + """ + + order_by: Optional[ListRpnInvitesRequestOrderBy] + """ + Order of the rpn capable resources. + """ + + project_id: Optional[str] + """ + Filter rpn capable resources by project ID. + """ + + +@dataclass +class RpnV1ApiRefuseRpnInviteRequest: + member_id: int + """ + The member ID. + """ + + +@dataclass +class RpnV1ApiRpnGroupInviteRequest: + group_id: int + """ + The RPN V1 group ID. + """ + + server_ids: List[int] + """ + A collection of external server IDs. + """ + + project_id: Optional[str] + """ + A project ID. + """ + + +@dataclass +class RpnV1ApiUpdateRpnGroupNameRequest: + group_id: int + """ + Rpn v1 group ID. + """ + + name: Optional[str] + """ + New rpn v1 group name. + """ + + +@dataclass +class RpnV2ApiAddRpnV2MembersRequest: + group_id: int + """ + RPN V2 group ID. + """ + + servers: List[int] + """ + A collection of server IDs. + """ + + +@dataclass +class RpnV2ApiCreateRpnV2GroupRequest: + name: str + """ + RPN V2 group name. + """ + + servers: List[int] + """ + A collection of server IDs. + """ + + project_id: Optional[str] + """ + Project ID of the RPN V2 group. + """ + + type_: Optional[RpnV2GroupType] + """ + RPN V2 group type (qing / standard). + """ + + +@dataclass +class RpnV2ApiDeleteRpnV2GroupRequest: + group_id: int + """ + RPN V2 group ID. + """ + + +@dataclass +class RpnV2ApiDeleteRpnV2MembersRequest: + group_id: int + """ + RPN V2 group ID. + """ + + member_ids: List[int] + """ + A collection of member IDs. + """ + + +@dataclass +class RpnV2ApiDisableRpnV2GroupCompatibilityRequest: + group_id: int + """ + RPN V2 group ID. + """ + + +@dataclass +class RpnV2ApiEnableRpnV2GroupCompatibilityRequest: + group_id: int + """ + RPN V2 group ID. + """ + + rpnv1_group_id: int + """ + RPN V1 group ID. + """ + + +@dataclass +class RpnV2ApiGetRpnV2GroupRequest: + group_id: int + """ + RPN V2 group ID. + """ + + +@dataclass +class RpnV2ApiListRpnV2CapableResourcesRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v2 capable resources per page. + """ + + order_by: Optional[ListRpnV2CapableResourcesRequestOrderBy] + """ + Order of the rpn v2 capable resources. + """ + + project_id: Optional[str] + """ + Filter rpn v2 capable resources by project ID. + """ + + +@dataclass +class RpnV2ApiListRpnV2GroupLogsRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v2 group logs per page. + """ + + order_by: Optional[ListRpnV2GroupLogsRequestOrderBy] + """ + Order of the rpn v2 group logs. + """ + + group_id: int + """ + RPN V2 group ID. + """ + + +@dataclass +class RpnV2ApiListRpnV2GroupsRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v2 groups per page. + """ + + order_by: Optional[ListRpnV2GroupsRequestOrderBy] + """ + Order of the rpn v2 groups. + """ + + project_id: Optional[str] + """ + Filter rpn v2 groups by project ID. + """ + + +@dataclass +class RpnV2ApiListRpnV2MembersRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v2 group members per page. + """ + + order_by: Optional[ListRpnV2MembersRequestOrderBy] + """ + Order of the rpn v2 group members. + """ + + group_id: int + """ + RPN V2 group ID. + """ + + type_: Optional[ListRpnV2MembersRequestType] + """ + Filter members by type. + """ + + +@dataclass +class RpnV2ApiUpdateRpnV2GroupNameRequest: + group_id: int + """ + RPN V2 group ID. + """ + + name: Optional[str] + """ + RPN V2 group name. + """ + + +@dataclass +class RpnV2ApiUpdateRpnV2VlanForMembersRequest: + group_id: int + """ + RPN V2 group ID. + """ + + member_ids: List[int] + """ + RPN V2 member IDs. + """ + + vlan: Optional[int] + """ + Min: 0. +Max: 3967. + """ + + +@dataclass +class ServerDefaultPartitioning: + partitions: List[Partition] + """ + Default partitions. + """ + + +@dataclass +class ServerInstall: + os_id: int + + hostname: str + + partitions: List[Partition] + + ssh_key_ids: List[str] + + status: ServerInstallStatus + + user_login: Optional[str] + + panel_url: Optional[str] + + +@dataclass +class StartBMCAccessRequest: + server_id: int + """ + ID of the server to start the BMC access. + """ + + ip: str + """ + The IP authorized to connect to the given server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class StartRescueRequest: + server_id: int + """ + ID of the server to start rescue. + """ + + os_id: int + """ + OS ID to use to start rescue. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class StartServerRequest: + server_id: int + """ + Server ID to start. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class StopBMCAccessRequest: + server_id: int + """ + ID of the server to stop BMC access. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class StopRescueRequest: + server_id: int + """ + ID of the server to stop rescue. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class StopServerRequest: + server_id: int + """ + Server ID to stop. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class SubscribeServerOptionRequest: + server_id: int + """ + Server ID to subscribe server option. + """ + + option_id: int + """ + Option ID to subscribe. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class SubscribeStorageOptionsRequest: + server_id: int + """ + Server ID of the storage options to subscribe. + """ + + options_ids: List[int] + """ + Option IDs of the storage options to subscribe. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class SubscribeStorageOptionsResponse: + services: List[Service] + """ + Services subscribe storage options. + """ + + +@dataclass +class UpdateRaidRequest: + server_id: int + """ + ID of the server. + """ + + raid_arrays: List[UpdatableRaidArray] + """ + RAIDs to update. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class UpdateReverseRequest: + ip_id: int + """ + ID of the IP. + """ + + reverse: str + """ + Reverse to apply on the IP. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class UpdateServerBackupRequest: + server_id: int + """ + Server ID to update backup. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + password: Optional[str] + """ + Password of the server backup. + """ + + autologin: Optional[bool] + """ + Autologin of the server backup. + """ + + acl_enabled: Optional[bool] + """ + Boolean to enable or disable ACL. + """ + + +@dataclass +class UpdateServerRequest: + server_id: int + """ + Server ID to update. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + hostname: Optional[str] + """ + Hostname of the server to update. + """ + + enable_ipv6: Optional[bool] + """ + Flag to enable or not the IPv6 of server. + """ + + +@dataclass +class UpdateServerTagsRequest: + server_id: int + """ + Server ID to update the tags. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + tags: Optional[List[str]] + """ + Tags of server to update. + """ diff --git a/scaleway/scaleway/dedibox/__init__.py b/scaleway/scaleway/dedibox/__init__.py new file mode 100644 index 000000000..8b74a5ed7 --- /dev/null +++ b/scaleway/scaleway/dedibox/__init__.py @@ -0,0 +1,2 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. diff --git a/scaleway/scaleway/dedibox/v1/__init__.py b/scaleway/scaleway/dedibox/v1/__init__.py new file mode 100644 index 000000000..e6b1144a7 --- /dev/null +++ b/scaleway/scaleway/dedibox/v1/__init__.py @@ -0,0 +1,541 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from .types import AttachFailoverIPToMacAddressRequestMacType +from .types import BMCAccessStatus +from .content import BMC_ACCESS_TRANSIENT_STATUSES +from .types import BackupStatus +from .types import FailoverBlockVersion +from .types import FailoverIPInterfaceType +from .types import FailoverIPStatus +from .types import FailoverIPVersion +from .types import GetRpnStatusResponseStatus +from .types import IPSemantic +from .types import IPStatus +from .types import IPVersion +from .types import IPv6BlockDelegationStatus +from .content import I_PV6_BLOCK_DELEGATION_TRANSIENT_STATUSES +from .types import InvoicePaymentMethod +from .types import InvoiceStatus +from .types import ListFailoverIPsRequestOrderBy +from .types import ListInvoicesRequestOrderBy +from .types import ListOSRequestOrderBy +from .types import ListOffersRequestOrderBy +from .types import ListRefundsRequestOrderBy +from .types import ListRpnCapableSanServersRequestOrderBy +from .types import ListRpnCapableServersRequestOrderBy +from .types import ListRpnGroupMembersRequestOrderBy +from .types import ListRpnGroupsRequestOrderBy +from .types import ListRpnInvitesRequestOrderBy +from .types import ListRpnSansRequestOrderBy +from .types import ListRpnServerCapabilitiesRequestOrderBy +from .types import ListRpnV2CapableResourcesRequestOrderBy +from .types import ListRpnV2GroupLogsRequestOrderBy +from .types import ListRpnV2GroupsRequestOrderBy +from .types import ListRpnV2MembersRequestOrderBy +from .types import ListRpnV2MembersRequestType +from .types import ListServerDisksRequestOrderBy +from .types import ListServerEventsRequestOrderBy +from .types import ListServersRequestOrderBy +from .types import ListServicesRequestOrderBy +from .types import LogAction +from .types import LogStatus +from .types import MemoryType +from .types import NetworkInterfaceInterfaceType +from .types import OSArch +from .types import OSType +from .types import OfferAntiDosInfoType +from .types import OfferCatalog +from .types import OfferPaymentFrequency +from .types import OfferSANInfoType +from .types import OfferServerInfoStock +from .types import PartitionFileSystem +from .types import PartitionType +from .types import RaidArrayRaidLevel +from .types import RefundMethod +from .types import RefundStatus +from .types import RescueProtocol +from .types import RpnGroupMemberStatus +from .content import RPN_GROUP_MEMBER_TRANSIENT_STATUSES +from .types import RpnGroupType +from .types import RpnSanIpType +from .types import RpnSanStatus +from .content import RPN_SAN_TRANSIENT_STATUSES +from .types import RpnV2GroupStatus +from .content import RPN_V2_GROUP_TRANSIENT_STATUSES +from .types import RpnV2GroupType +from .types import RpnV2MemberStatus +from .content import RPN_V2_MEMBER_TRANSIENT_STATUSES +from .types import ServerDiskType +from .types import ServerInstallStatus +from .content import SERVER_INSTALL_TRANSIENT_STATUSES +from .types import ServerStatus +from .content import SERVER_TRANSIENT_STATUSES +from .types import ServiceLevelLevel +from .types import ServiceProvisioningStatus +from .content import SERVICE_PROVISIONING_TRANSIENT_STATUSES +from .types import ServiceType +from .types import OfferAntiDosInfo +from .types import OfferBackupInfo +from .types import OfferBandwidthInfo +from .types import OfferLicenseInfo +from .types import OfferRPNInfo +from .types import OfferSANInfo +from .types import OfferStorageInfo +from .types import IP +from .types import Offer +from .types import NetworkInterface +from .types import OS +from .types import ServerLocation +from .types import ServerOption +from .types import ServiceLevel +from .types import RpnSan +from .types import RpnGroup +from .types import RpnV2GroupSubnet +from .types import Server +from .types import FailoverBlock +from .types import RpnSanIpRpnV2Group +from .types import RpnSanIpServer +from .types import RpnSanServer +from .types import RpnV2Group +from .types import RpnV2Member +from .types import ServerDisk +from .types import Service +from .types import GetIPv6BlockQuotasResponseQuota +from .types import InstallPartition +from .types import FailoverIP +from .types import ListIPv6BlockSubnetsAvailableResponseSubnet +from .types import InvoiceSummary +from .types import RpnSanIp +from .types import RefundSummary +from .types import RpnGroupMember +from .types import RpnSanSummary +from .types import RpnServerCapability +from .types import Log +from .types import ServerEvent +from .types import ServerSummary +from .types import CPU +from .types import Disk +from .types import Memory +from .types import PersistentMemory +from .types import RaidController +from .types import RaidArray +from .types import Partition +from .types import UpdatableRaidArray +from .types import AttachFailoverIPToMacAddressRequest +from .types import AttachFailoverIPsRequest +from .types import BMCAccess +from .types import Backup +from .types import BillingApiCanOrderRequest +from .types import BillingApiDownloadInvoiceRequest +from .types import BillingApiDownloadRefundRequest +from .types import BillingApiGetInvoiceRequest +from .types import BillingApiGetRefundRequest +from .types import BillingApiListInvoicesRequest +from .types import BillingApiListRefundsRequest +from .types import CanOrderResponse +from .types import CancelServerInstallRequest +from .types import CreateFailoverIPsRequest +from .types import CreateFailoverIPsResponse +from .types import CreateServerRequest +from .types import DeleteFailoverIPRequest +from .types import DeleteServerRequest +from .types import DeleteServiceRequest +from .types import DetachFailoverIPFromMacAddressRequest +from .types import DetachFailoverIPsRequest +from .types import GetBMCAccessRequest +from .types import GetFailoverIPRequest +from .types import GetIPv6BlockQuotasResponse +from .types import GetOSRequest +from .types import GetOfferRequest +from .types import GetOrderedServiceRequest +from .types import GetRaidRequest +from .types import GetRemainingQuotaRequest +from .types import GetRemainingQuotaResponse +from .types import GetRescueRequest +from .types import GetRpnStatusResponse +from .types import GetServerBackupRequest +from .types import GetServerDefaultPartitioningRequest +from .types import GetServerInstallRequest +from .types import GetServerRequest +from .types import GetServiceRequest +from .types import IPv6Block +from .types import IPv6BlockApiCreateIPv6BlockRequest +from .types import IPv6BlockApiCreateIPv6BlockSubnetRequest +from .types import IPv6BlockApiDeleteIPv6BlockRequest +from .types import IPv6BlockApiGetIPv6BlockQuotasRequest +from .types import IPv6BlockApiGetIPv6BlockRequest +from .types import IPv6BlockApiListIPv6BlockSubnetsAvailableRequest +from .types import IPv6BlockApiUpdateIPv6BlockRequest +from .types import InstallServerRequest +from .types import Invoice +from .types import ListFailoverIPsRequest +from .types import ListFailoverIPsResponse +from .types import ListIPv6BlockSubnetsAvailableResponse +from .types import ListInvoicesResponse +from .types import ListIpsResponse +from .types import ListOSRequest +from .types import ListOSResponse +from .types import ListOffersRequest +from .types import ListOffersResponse +from .types import ListRefundsResponse +from .types import ListRpnCapableSanServersResponse +from .types import ListRpnCapableServersResponse +from .types import ListRpnGroupMembersResponse +from .types import ListRpnGroupsResponse +from .types import ListRpnInvitesResponse +from .types import ListRpnSansResponse +from .types import ListRpnServerCapabilitiesResponse +from .types import ListRpnV2CapableResourcesResponse +from .types import ListRpnV2GroupLogsResponse +from .types import ListRpnV2GroupsResponse +from .types import ListRpnV2MembersResponse +from .types import ListServerDisksRequest +from .types import ListServerDisksResponse +from .types import ListServerEventsRequest +from .types import ListServerEventsResponse +from .types import ListServersRequest +from .types import ListServersResponse +from .types import ListServicesRequest +from .types import ListServicesResponse +from .types import ListSubscribableServerOptionsRequest +from .types import ListSubscribableServerOptionsResponse +from .types import OfferFailoverBlockInfo +from .types import OfferFailoverIpInfo +from .types import OfferServerInfo +from .types import OfferServiceLevelInfo +from .types import Raid +from .types import RebootServerRequest +from .types import Refund +from .types import Rescue +from .types import RpnApiGetRpnStatusRequest +from .types import RpnApiListRpnServerCapabilitiesRequest +from .types import RpnSanApiAddIpRequest +from .types import RpnSanApiCreateRpnSanRequest +from .types import RpnSanApiDeleteRpnSanRequest +from .types import RpnSanApiGetRpnSanRequest +from .types import RpnSanApiListAvailableIpsRequest +from .types import RpnSanApiListIpsRequest +from .types import RpnSanApiListRpnSansRequest +from .types import RpnSanApiRemoveIpRequest +from .types import RpnV1ApiAcceptRpnInviteRequest +from .types import RpnV1ApiAddRpnGroupMembersRequest +from .types import RpnV1ApiCreateRpnGroupRequest +from .types import RpnV1ApiDeleteRpnGroupMembersRequest +from .types import RpnV1ApiDeleteRpnGroupRequest +from .types import RpnV1ApiGetRpnGroupRequest +from .types import RpnV1ApiLeaveRpnGroupRequest +from .types import RpnV1ApiListRpnCapableSanServersRequest +from .types import RpnV1ApiListRpnCapableServersRequest +from .types import RpnV1ApiListRpnGroupMembersRequest +from .types import RpnV1ApiListRpnGroupsRequest +from .types import RpnV1ApiListRpnInvitesRequest +from .types import RpnV1ApiRefuseRpnInviteRequest +from .types import RpnV1ApiRpnGroupInviteRequest +from .types import RpnV1ApiUpdateRpnGroupNameRequest +from .types import RpnV2ApiAddRpnV2MembersRequest +from .types import RpnV2ApiCreateRpnV2GroupRequest +from .types import RpnV2ApiDeleteRpnV2GroupRequest +from .types import RpnV2ApiDeleteRpnV2MembersRequest +from .types import RpnV2ApiDisableRpnV2GroupCompatibilityRequest +from .types import RpnV2ApiEnableRpnV2GroupCompatibilityRequest +from .types import RpnV2ApiGetRpnV2GroupRequest +from .types import RpnV2ApiListRpnV2CapableResourcesRequest +from .types import RpnV2ApiListRpnV2GroupLogsRequest +from .types import RpnV2ApiListRpnV2GroupsRequest +from .types import RpnV2ApiListRpnV2MembersRequest +from .types import RpnV2ApiUpdateRpnV2GroupNameRequest +from .types import RpnV2ApiUpdateRpnV2VlanForMembersRequest +from .types import ServerDefaultPartitioning +from .types import ServerInstall +from .types import StartBMCAccessRequest +from .types import StartRescueRequest +from .types import StartServerRequest +from .types import StopBMCAccessRequest +from .types import StopRescueRequest +from .types import StopServerRequest +from .types import SubscribeServerOptionRequest +from .types import SubscribeStorageOptionsRequest +from .types import SubscribeStorageOptionsResponse +from .types import UpdateRaidRequest +from .types import UpdateReverseRequest +from .types import UpdateServerBackupRequest +from .types import UpdateServerRequest +from .types import UpdateServerTagsRequest +from .api import DediboxV1API +from .api import DediboxV1BillingAPI +from .api import DediboxV1IPv6BlockAPI +from .api import DediboxV1RpnAPI +from .api import DediboxV1RpnSanAPI +from .api import DediboxV1RpnV1API +from .api import DediboxV1RpnV2API + +__all__ = [ + "AttachFailoverIPToMacAddressRequestMacType", + "BMCAccessStatus", + "BMC_ACCESS_TRANSIENT_STATUSES", + "BackupStatus", + "FailoverBlockVersion", + "FailoverIPInterfaceType", + "FailoverIPStatus", + "FailoverIPVersion", + "GetRpnStatusResponseStatus", + "IPSemantic", + "IPStatus", + "IPVersion", + "IPv6BlockDelegationStatus", + "I_PV6_BLOCK_DELEGATION_TRANSIENT_STATUSES", + "InvoicePaymentMethod", + "InvoiceStatus", + "ListFailoverIPsRequestOrderBy", + "ListInvoicesRequestOrderBy", + "ListOSRequestOrderBy", + "ListOffersRequestOrderBy", + "ListRefundsRequestOrderBy", + "ListRpnCapableSanServersRequestOrderBy", + "ListRpnCapableServersRequestOrderBy", + "ListRpnGroupMembersRequestOrderBy", + "ListRpnGroupsRequestOrderBy", + "ListRpnInvitesRequestOrderBy", + "ListRpnSansRequestOrderBy", + "ListRpnServerCapabilitiesRequestOrderBy", + "ListRpnV2CapableResourcesRequestOrderBy", + "ListRpnV2GroupLogsRequestOrderBy", + "ListRpnV2GroupsRequestOrderBy", + "ListRpnV2MembersRequestOrderBy", + "ListRpnV2MembersRequestType", + "ListServerDisksRequestOrderBy", + "ListServerEventsRequestOrderBy", + "ListServersRequestOrderBy", + "ListServicesRequestOrderBy", + "LogAction", + "LogStatus", + "MemoryType", + "NetworkInterfaceInterfaceType", + "OSArch", + "OSType", + "OfferAntiDosInfoType", + "OfferCatalog", + "OfferPaymentFrequency", + "OfferSANInfoType", + "OfferServerInfoStock", + "PartitionFileSystem", + "PartitionType", + "RaidArrayRaidLevel", + "RefundMethod", + "RefundStatus", + "RescueProtocol", + "RpnGroupMemberStatus", + "RPN_GROUP_MEMBER_TRANSIENT_STATUSES", + "RpnGroupType", + "RpnSanIpType", + "RpnSanStatus", + "RPN_SAN_TRANSIENT_STATUSES", + "RpnV2GroupStatus", + "RPN_V2_GROUP_TRANSIENT_STATUSES", + "RpnV2GroupType", + "RpnV2MemberStatus", + "RPN_V2_MEMBER_TRANSIENT_STATUSES", + "ServerDiskType", + "ServerInstallStatus", + "SERVER_INSTALL_TRANSIENT_STATUSES", + "ServerStatus", + "SERVER_TRANSIENT_STATUSES", + "ServiceLevelLevel", + "ServiceProvisioningStatus", + "SERVICE_PROVISIONING_TRANSIENT_STATUSES", + "ServiceType", + "OfferAntiDosInfo", + "OfferBackupInfo", + "OfferBandwidthInfo", + "OfferLicenseInfo", + "OfferRPNInfo", + "OfferSANInfo", + "OfferStorageInfo", + "IP", + "Offer", + "NetworkInterface", + "OS", + "ServerLocation", + "ServerOption", + "ServiceLevel", + "RpnSan", + "RpnGroup", + "RpnV2GroupSubnet", + "Server", + "FailoverBlock", + "RpnSanIpRpnV2Group", + "RpnSanIpServer", + "RpnSanServer", + "RpnV2Group", + "RpnV2Member", + "ServerDisk", + "Service", + "GetIPv6BlockQuotasResponseQuota", + "InstallPartition", + "FailoverIP", + "ListIPv6BlockSubnetsAvailableResponseSubnet", + "InvoiceSummary", + "RpnSanIp", + "RefundSummary", + "RpnGroupMember", + "RpnSanSummary", + "RpnServerCapability", + "Log", + "ServerEvent", + "ServerSummary", + "CPU", + "Disk", + "Memory", + "PersistentMemory", + "RaidController", + "RaidArray", + "Partition", + "UpdatableRaidArray", + "AttachFailoverIPToMacAddressRequest", + "AttachFailoverIPsRequest", + "BMCAccess", + "Backup", + "BillingApiCanOrderRequest", + "BillingApiDownloadInvoiceRequest", + "BillingApiDownloadRefundRequest", + "BillingApiGetInvoiceRequest", + "BillingApiGetRefundRequest", + "BillingApiListInvoicesRequest", + "BillingApiListRefundsRequest", + "CanOrderResponse", + "CancelServerInstallRequest", + "CreateFailoverIPsRequest", + "CreateFailoverIPsResponse", + "CreateServerRequest", + "DeleteFailoverIPRequest", + "DeleteServerRequest", + "DeleteServiceRequest", + "DetachFailoverIPFromMacAddressRequest", + "DetachFailoverIPsRequest", + "GetBMCAccessRequest", + "GetFailoverIPRequest", + "GetIPv6BlockQuotasResponse", + "GetOSRequest", + "GetOfferRequest", + "GetOrderedServiceRequest", + "GetRaidRequest", + "GetRemainingQuotaRequest", + "GetRemainingQuotaResponse", + "GetRescueRequest", + "GetRpnStatusResponse", + "GetServerBackupRequest", + "GetServerDefaultPartitioningRequest", + "GetServerInstallRequest", + "GetServerRequest", + "GetServiceRequest", + "IPv6Block", + "IPv6BlockApiCreateIPv6BlockRequest", + "IPv6BlockApiCreateIPv6BlockSubnetRequest", + "IPv6BlockApiDeleteIPv6BlockRequest", + "IPv6BlockApiGetIPv6BlockQuotasRequest", + "IPv6BlockApiGetIPv6BlockRequest", + "IPv6BlockApiListIPv6BlockSubnetsAvailableRequest", + "IPv6BlockApiUpdateIPv6BlockRequest", + "InstallServerRequest", + "Invoice", + "ListFailoverIPsRequest", + "ListFailoverIPsResponse", + "ListIPv6BlockSubnetsAvailableResponse", + "ListInvoicesResponse", + "ListIpsResponse", + "ListOSRequest", + "ListOSResponse", + "ListOffersRequest", + "ListOffersResponse", + "ListRefundsResponse", + "ListRpnCapableSanServersResponse", + "ListRpnCapableServersResponse", + "ListRpnGroupMembersResponse", + "ListRpnGroupsResponse", + "ListRpnInvitesResponse", + "ListRpnSansResponse", + "ListRpnServerCapabilitiesResponse", + "ListRpnV2CapableResourcesResponse", + "ListRpnV2GroupLogsResponse", + "ListRpnV2GroupsResponse", + "ListRpnV2MembersResponse", + "ListServerDisksRequest", + "ListServerDisksResponse", + "ListServerEventsRequest", + "ListServerEventsResponse", + "ListServersRequest", + "ListServersResponse", + "ListServicesRequest", + "ListServicesResponse", + "ListSubscribableServerOptionsRequest", + "ListSubscribableServerOptionsResponse", + "OfferFailoverBlockInfo", + "OfferFailoverIpInfo", + "OfferServerInfo", + "OfferServiceLevelInfo", + "Raid", + "RebootServerRequest", + "Refund", + "Rescue", + "RpnApiGetRpnStatusRequest", + "RpnApiListRpnServerCapabilitiesRequest", + "RpnSanApiAddIpRequest", + "RpnSanApiCreateRpnSanRequest", + "RpnSanApiDeleteRpnSanRequest", + "RpnSanApiGetRpnSanRequest", + "RpnSanApiListAvailableIpsRequest", + "RpnSanApiListIpsRequest", + "RpnSanApiListRpnSansRequest", + "RpnSanApiRemoveIpRequest", + "RpnV1ApiAcceptRpnInviteRequest", + "RpnV1ApiAddRpnGroupMembersRequest", + "RpnV1ApiCreateRpnGroupRequest", + "RpnV1ApiDeleteRpnGroupMembersRequest", + "RpnV1ApiDeleteRpnGroupRequest", + "RpnV1ApiGetRpnGroupRequest", + "RpnV1ApiLeaveRpnGroupRequest", + "RpnV1ApiListRpnCapableSanServersRequest", + "RpnV1ApiListRpnCapableServersRequest", + "RpnV1ApiListRpnGroupMembersRequest", + "RpnV1ApiListRpnGroupsRequest", + "RpnV1ApiListRpnInvitesRequest", + "RpnV1ApiRefuseRpnInviteRequest", + "RpnV1ApiRpnGroupInviteRequest", + "RpnV1ApiUpdateRpnGroupNameRequest", + "RpnV2ApiAddRpnV2MembersRequest", + "RpnV2ApiCreateRpnV2GroupRequest", + "RpnV2ApiDeleteRpnV2GroupRequest", + "RpnV2ApiDeleteRpnV2MembersRequest", + "RpnV2ApiDisableRpnV2GroupCompatibilityRequest", + "RpnV2ApiEnableRpnV2GroupCompatibilityRequest", + "RpnV2ApiGetRpnV2GroupRequest", + "RpnV2ApiListRpnV2CapableResourcesRequest", + "RpnV2ApiListRpnV2GroupLogsRequest", + "RpnV2ApiListRpnV2GroupsRequest", + "RpnV2ApiListRpnV2MembersRequest", + "RpnV2ApiUpdateRpnV2GroupNameRequest", + "RpnV2ApiUpdateRpnV2VlanForMembersRequest", + "ServerDefaultPartitioning", + "ServerInstall", + "StartBMCAccessRequest", + "StartRescueRequest", + "StartServerRequest", + "StopBMCAccessRequest", + "StopRescueRequest", + "StopServerRequest", + "SubscribeServerOptionRequest", + "SubscribeStorageOptionsRequest", + "SubscribeStorageOptionsResponse", + "UpdateRaidRequest", + "UpdateReverseRequest", + "UpdateServerBackupRequest", + "UpdateServerRequest", + "UpdateServerTagsRequest", + "DediboxV1API", + "DediboxV1BillingAPI", + "DediboxV1IPv6BlockAPI", + "DediboxV1RpnAPI", + "DediboxV1RpnSanAPI", + "DediboxV1RpnV1API", + "DediboxV1RpnV2API", +] diff --git a/scaleway/scaleway/dedibox/v1/api.py b/scaleway/scaleway/dedibox/v1/api.py new file mode 100644 index 000000000..9f4c3dcf0 --- /dev/null +++ b/scaleway/scaleway/dedibox/v1/api.py @@ -0,0 +1,4721 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import List, Optional + +from scaleway_core.api import API +from scaleway_core.bridge import ( + ScwFile, + Zone, + unmarshal_ScwFile, +) +from scaleway_core.utils import ( + WaitForOptions, + validate_path_param, + fetch_all_pages, + wait_for_resource, +) +from .types import ( + AttachFailoverIPToMacAddressRequestMacType, + ListFailoverIPsRequestOrderBy, + ListInvoicesRequestOrderBy, + ListOSRequestOrderBy, + ListOffersRequestOrderBy, + ListRefundsRequestOrderBy, + ListRpnCapableSanServersRequestOrderBy, + ListRpnCapableServersRequestOrderBy, + ListRpnGroupMembersRequestOrderBy, + ListRpnGroupsRequestOrderBy, + ListRpnInvitesRequestOrderBy, + ListRpnSansRequestOrderBy, + ListRpnServerCapabilitiesRequestOrderBy, + ListRpnV2CapableResourcesRequestOrderBy, + ListRpnV2GroupLogsRequestOrderBy, + ListRpnV2GroupsRequestOrderBy, + ListRpnV2MembersRequestOrderBy, + ListRpnV2MembersRequestType, + ListServerDisksRequestOrderBy, + ListServerEventsRequestOrderBy, + ListServersRequestOrderBy, + ListServicesRequestOrderBy, + OSType, + OfferCatalog, + RpnSanIpType, + RpnV2GroupType, + AttachFailoverIPToMacAddressRequest, + AttachFailoverIPsRequest, + BMCAccess, + Backup, + CanOrderResponse, + CreateFailoverIPsRequest, + CreateFailoverIPsResponse, + CreateServerRequest, + DetachFailoverIPsRequest, + FailoverIP, + GetIPv6BlockQuotasResponse, + GetRemainingQuotaResponse, + GetRpnStatusResponse, + IP, + IPv6Block, + IPv6BlockApiCreateIPv6BlockRequest, + IPv6BlockApiCreateIPv6BlockSubnetRequest, + IPv6BlockApiUpdateIPv6BlockRequest, + InstallPartition, + InstallServerRequest, + Invoice, + InvoiceSummary, + ListFailoverIPsResponse, + ListIPv6BlockSubnetsAvailableResponse, + ListInvoicesResponse, + ListIpsResponse, + ListOSResponse, + ListOffersResponse, + ListRefundsResponse, + ListRpnCapableSanServersResponse, + ListRpnCapableServersResponse, + ListRpnGroupMembersResponse, + ListRpnGroupsResponse, + ListRpnInvitesResponse, + ListRpnSansResponse, + ListRpnServerCapabilitiesResponse, + ListRpnV2CapableResourcesResponse, + ListRpnV2GroupLogsResponse, + ListRpnV2GroupsResponse, + ListRpnV2MembersResponse, + ListServerDisksResponse, + ListServerEventsResponse, + ListServersResponse, + ListServicesResponse, + ListSubscribableServerOptionsResponse, + Log, + OS, + Offer, + Raid, + Refund, + RefundSummary, + Rescue, + RpnGroup, + RpnGroupMember, + RpnSan, + RpnSanApiAddIpRequest, + RpnSanApiCreateRpnSanRequest, + RpnSanApiRemoveIpRequest, + RpnSanServer, + RpnSanSummary, + RpnServerCapability, + RpnV1ApiAddRpnGroupMembersRequest, + RpnV1ApiCreateRpnGroupRequest, + RpnV1ApiDeleteRpnGroupMembersRequest, + RpnV1ApiLeaveRpnGroupRequest, + RpnV1ApiRpnGroupInviteRequest, + RpnV1ApiUpdateRpnGroupNameRequest, + RpnV2ApiAddRpnV2MembersRequest, + RpnV2ApiCreateRpnV2GroupRequest, + RpnV2ApiDeleteRpnV2MembersRequest, + RpnV2ApiEnableRpnV2GroupCompatibilityRequest, + RpnV2ApiUpdateRpnV2GroupNameRequest, + RpnV2ApiUpdateRpnV2VlanForMembersRequest, + RpnV2Group, + RpnV2Member, + Server, + ServerDefaultPartitioning, + ServerDisk, + ServerEvent, + ServerInstall, + ServerSummary, + Service, + StartBMCAccessRequest, + StartRescueRequest, + SubscribeServerOptionRequest, + SubscribeStorageOptionsRequest, + SubscribeStorageOptionsResponse, + UpdatableRaidArray, + UpdateRaidRequest, + UpdateReverseRequest, + UpdateServerBackupRequest, + UpdateServerRequest, + UpdateServerTagsRequest, +) +from .content import ( + BMC_ACCESS_TRANSIENT_STATUSES, + RPN_SAN_TRANSIENT_STATUSES, + RPN_V2_GROUP_TRANSIENT_STATUSES, + SERVER_INSTALL_TRANSIENT_STATUSES, + SERVER_TRANSIENT_STATUSES, +) +from .marshalling import ( + unmarshal_IP, + unmarshal_Offer, + unmarshal_OS, + unmarshal_RpnSan, + unmarshal_RpnGroup, + unmarshal_Server, + unmarshal_RpnV2Group, + unmarshal_Service, + unmarshal_FailoverIP, + unmarshal_BMCAccess, + unmarshal_Backup, + unmarshal_CanOrderResponse, + unmarshal_CreateFailoverIPsResponse, + unmarshal_GetIPv6BlockQuotasResponse, + unmarshal_GetRemainingQuotaResponse, + unmarshal_GetRpnStatusResponse, + unmarshal_IPv6Block, + unmarshal_Invoice, + unmarshal_ListFailoverIPsResponse, + unmarshal_ListIPv6BlockSubnetsAvailableResponse, + unmarshal_ListInvoicesResponse, + unmarshal_ListIpsResponse, + unmarshal_ListOSResponse, + unmarshal_ListOffersResponse, + unmarshal_ListRefundsResponse, + unmarshal_ListRpnCapableSanServersResponse, + unmarshal_ListRpnCapableServersResponse, + unmarshal_ListRpnGroupMembersResponse, + unmarshal_ListRpnGroupsResponse, + unmarshal_ListRpnInvitesResponse, + unmarshal_ListRpnSansResponse, + unmarshal_ListRpnServerCapabilitiesResponse, + unmarshal_ListRpnV2CapableResourcesResponse, + unmarshal_ListRpnV2GroupLogsResponse, + unmarshal_ListRpnV2GroupsResponse, + unmarshal_ListRpnV2MembersResponse, + unmarshal_ListServerDisksResponse, + unmarshal_ListServerEventsResponse, + unmarshal_ListServersResponse, + unmarshal_ListServicesResponse, + unmarshal_ListSubscribableServerOptionsResponse, + unmarshal_Raid, + unmarshal_Refund, + unmarshal_Rescue, + unmarshal_ServerDefaultPartitioning, + unmarshal_ServerInstall, + unmarshal_SubscribeStorageOptionsResponse, + marshal_AttachFailoverIPToMacAddressRequest, + marshal_AttachFailoverIPsRequest, + marshal_CreateFailoverIPsRequest, + marshal_CreateServerRequest, + marshal_DetachFailoverIPsRequest, + marshal_IPv6BlockApiCreateIPv6BlockRequest, + marshal_IPv6BlockApiCreateIPv6BlockSubnetRequest, + marshal_IPv6BlockApiUpdateIPv6BlockRequest, + marshal_InstallServerRequest, + marshal_RpnSanApiAddIpRequest, + marshal_RpnSanApiCreateRpnSanRequest, + marshal_RpnSanApiRemoveIpRequest, + marshal_RpnV1ApiAddRpnGroupMembersRequest, + marshal_RpnV1ApiCreateRpnGroupRequest, + marshal_RpnV1ApiDeleteRpnGroupMembersRequest, + marshal_RpnV1ApiLeaveRpnGroupRequest, + marshal_RpnV1ApiRpnGroupInviteRequest, + marshal_RpnV1ApiUpdateRpnGroupNameRequest, + marshal_RpnV2ApiAddRpnV2MembersRequest, + marshal_RpnV2ApiCreateRpnV2GroupRequest, + marshal_RpnV2ApiDeleteRpnV2MembersRequest, + marshal_RpnV2ApiEnableRpnV2GroupCompatibilityRequest, + marshal_RpnV2ApiUpdateRpnV2GroupNameRequest, + marshal_RpnV2ApiUpdateRpnV2VlanForMembersRequest, + marshal_StartBMCAccessRequest, + marshal_StartRescueRequest, + marshal_SubscribeServerOptionRequest, + marshal_SubscribeStorageOptionsRequest, + marshal_UpdateRaidRequest, + marshal_UpdateReverseRequest, + marshal_UpdateServerBackupRequest, + marshal_UpdateServerRequest, + marshal_UpdateServerTagsRequest, +) + + +class DediboxV1API(API): + """ + Dedibox Phoenix API. + """ + + def list_servers( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServersRequestOrderBy] = None, + project_id: Optional[str] = None, + search: Optional[str] = None, + ) -> ListServersResponse: + """ + List baremetal servers for project. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server per page. + :param order_by: Order of the servers. + :param project_id: Filter servers by project ID. + :param search: Filter servers by hostname. + :return: :class:`ListServersResponse ` + + Usage: + :: + + result = api.list_servers() + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "search": search, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListServersResponse(res.json()) + + def list_servers_all( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServersRequestOrderBy] = None, + project_id: Optional[str] = None, + search: Optional[str] = None, + ) -> List[ServerSummary]: + """ + List baremetal servers for project. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server per page. + :param order_by: Order of the servers. + :param project_id: Filter servers by project ID. + :param search: Filter servers by hostname. + :return: :class:`List[ServerSummary] ` + + Usage: + :: + + result = api.list_servers_all() + """ + + return fetch_all_pages( + type=ListServersResponse, + key="servers", + fetcher=self.list_servers, + args={ + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + "search": search, + }, + ) + + def get_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> Server: + """ + Get a specific baremetal server. + Get the server associated with the given ID. + :param server_id: ID of the server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Server ` + + Usage: + :: + + result = api.get_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}", + ) + + self._throw_on_error(res) + return unmarshal_Server(res.json()) + + def wait_for_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + options: Optional[WaitForOptions[Server, bool]] = None, + ) -> Server: + """ + Get a specific baremetal server. + Get the server associated with the given ID. + :param server_id: ID of the server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Server ` + + Usage: + :: + + result = api.get_server( + server_id=1, + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = lambda res: res.status not in SERVER_TRANSIENT_STATUSES + + return wait_for_resource( + fetcher=self.get_server, + options=options, + args={ + "server_id": server_id, + "zone": zone, + }, + ) + + def get_server_backup( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> Backup: + """ + :param server_id: Server ID of the backup. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Backup ` + + Usage: + :: + + result = api.get_server_backup( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/backups", + ) + + self._throw_on_error(res) + return unmarshal_Backup(res.json()) + + def update_server_backup( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + password: Optional[str] = None, + autologin: Optional[bool] = None, + acl_enabled: Optional[bool] = None, + ) -> Backup: + """ + :param server_id: Server ID to update backup. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param password: Password of the server backup. + :param autologin: Autologin of the server backup. + :param acl_enabled: Boolean to enable or disable ACL. + :return: :class:`Backup ` + + Usage: + :: + + result = api.update_server_backup( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/backups", + body=marshal_UpdateServerBackupRequest( + UpdateServerBackupRequest( + server_id=server_id, + zone=zone, + password=password, + autologin=autologin, + acl_enabled=acl_enabled, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Backup(res.json()) + + def list_subscribable_server_options( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> ListSubscribableServerOptionsResponse: + """ + List subscribable server options. + List subscribable options associated to the given server ID. + :param server_id: Server ID of the subscribable server options. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of subscribable server option per page. + :return: :class:`ListSubscribableServerOptionsResponse ` + + Usage: + :: + + result = api.list_subscribable_server_options( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/subscribable-server-options", + params={ + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListSubscribableServerOptionsResponse(res.json()) + + def list_subscribable_server_options_all( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> List[Offer]: + """ + List subscribable server options. + List subscribable options associated to the given server ID. + :param server_id: Server ID of the subscribable server options. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of subscribable server option per page. + :return: :class:`List[Offer] ` + + Usage: + :: + + result = api.list_subscribable_server_options_all( + server_id=1, + ) + """ + + return fetch_all_pages( + type=ListSubscribableServerOptionsResponse, + key="server_options", + fetcher=self.list_subscribable_server_options, + args={ + "server_id": server_id, + "zone": zone, + "page": page, + "page_size": page_size, + }, + ) + + def subscribe_server_option( + self, + *, + server_id: int, + option_id: int, + zone: Optional[Zone] = None, + ) -> Service: + """ + Subscribe server option. + Subscribe option for the given server ID. + :param server_id: Server ID to subscribe server option. + :param option_id: Option ID to subscribe. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Service ` + + Usage: + :: + + result = api.subscribe_server_option( + server_id=1, + option_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/subscribe-server-option", + body=marshal_SubscribeServerOptionRequest( + SubscribeServerOptionRequest( + server_id=server_id, + option_id=option_id, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + def create_server( + self, + *, + offer_id: int, + server_option_ids: List[int], + zone: Optional[Zone] = None, + project_id: Optional[str] = None, + datacenter_name: Optional[str] = None, + ) -> Service: + """ + Create a baremetal server. + Create a new baremetal server. The order return you a service ID to follow the provisionning status you could call GetService. + :param offer_id: Offer ID of the new server. + :param server_option_ids: Server option IDs of the new server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param project_id: Project ID of the new server. + :param datacenter_name: Datacenter name of the new server. + :return: :class:`Service ` + + Usage: + :: + + result = api.create_server( + offer_id=1, + server_option_ids=[], + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers", + body=marshal_CreateServerRequest( + CreateServerRequest( + offer_id=offer_id, + server_option_ids=server_option_ids, + zone=zone, + project_id=project_id, + datacenter_name=datacenter_name, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + def subscribe_storage_options( + self, + *, + server_id: int, + options_ids: List[int], + zone: Optional[Zone] = None, + ) -> SubscribeStorageOptionsResponse: + """ + Subscribe storage server option. + Subscribe storage option for the given server ID. + :param server_id: Server ID of the storage options to subscribe. + :param options_ids: Option IDs of the storage options to subscribe. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`SubscribeStorageOptionsResponse ` + + Usage: + :: + + result = api.subscribe_storage_options( + server_id=1, + options_ids=[], + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/subscribe-storage-options", + body=marshal_SubscribeStorageOptionsRequest( + SubscribeStorageOptionsRequest( + server_id=server_id, + options_ids=options_ids, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_SubscribeStorageOptionsResponse(res.json()) + + def update_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + hostname: Optional[str] = None, + enable_ipv6: Optional[bool] = None, + ) -> Server: + """ + Update a baremetal server. + Update the server associated with the given ID. + :param server_id: Server ID to update. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param hostname: Hostname of the server to update. + :param enable_ipv6: Flag to enable or not the IPv6 of server. + :return: :class:`Server ` + + Usage: + :: + + result = api.update_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}", + body=marshal_UpdateServerRequest( + UpdateServerRequest( + server_id=server_id, + zone=zone, + hostname=hostname, + enable_ipv6=enable_ipv6, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Server(res.json()) + + def update_server_tags( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + tags: Optional[List[str]] = None, + ) -> Server: + """ + :param server_id: Server ID to update the tags. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param tags: Tags of server to update. + :return: :class:`Server ` + + Usage: + :: + + result = api.update_server_tags( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/tags", + body=marshal_UpdateServerTagsRequest( + UpdateServerTagsRequest( + server_id=server_id, + zone=zone, + tags=tags, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Server(res.json()) + + def reboot_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Reboot a baremetal server. + Reboot the server associated with the given ID, use boot param to reboot in rescue. + :param server_id: Server ID to reboot. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.reboot_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/reboot", + body={}, + ) + + self._throw_on_error(res) + + def start_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Start a baremetal server. + Start the server associated with the given ID. + :param server_id: Server ID to start. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.start_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/start", + body={}, + ) + + self._throw_on_error(res) + + def stop_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Stop a baremetal server. + Stop the server associated with the given ID. + :param server_id: Server ID to stop. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.stop_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/stop", + body={}, + ) + + self._throw_on_error(res) + + def delete_server( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Delete a baremetal server. + Delete the server associated with the given ID. + :param server_id: Server ID to delete. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.delete_server( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}", + ) + + self._throw_on_error(res) + + def list_server_events( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServerEventsRequestOrderBy] = None, + ) -> ListServerEventsResponse: + """ + List server events. + List events associated to the given server ID. + :param server_id: Server ID of the server events. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server event per page. + :param order_by: Order of the server events. + :return: :class:`ListServerEventsResponse ` + + Usage: + :: + + result = api.list_server_events( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/events", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListServerEventsResponse(res.json()) + + def list_server_events_all( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServerEventsRequestOrderBy] = None, + ) -> List[ServerEvent]: + """ + List server events. + List events associated to the given server ID. + :param server_id: Server ID of the server events. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server event per page. + :param order_by: Order of the server events. + :return: :class:`List[ServerEvent] ` + + Usage: + :: + + result = api.list_server_events_all( + server_id=1, + ) + """ + + return fetch_all_pages( + type=ListServerEventsResponse, + key="events", + fetcher=self.list_server_events, + args={ + "server_id": server_id, + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + }, + ) + + def list_server_disks( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServerDisksRequestOrderBy] = None, + ) -> ListServerDisksResponse: + """ + List server disks. + List disks associated to the given server ID. + :param server_id: Server ID of the server disks. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server disk per page. + :param order_by: Order of the server disks. + :return: :class:`ListServerDisksResponse ` + + Usage: + :: + + result = api.list_server_disks( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/disks", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListServerDisksResponse(res.json()) + + def list_server_disks_all( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServerDisksRequestOrderBy] = None, + ) -> List[ServerDisk]: + """ + List server disks. + List disks associated to the given server ID. + :param server_id: Server ID of the server disks. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of server disk per page. + :param order_by: Order of the server disks. + :return: :class:`List[ServerDisk] ` + + Usage: + :: + + result = api.list_server_disks_all( + server_id=1, + ) + """ + + return fetch_all_pages( + type=ListServerDisksResponse, + key="disks", + fetcher=self.list_server_disks, + args={ + "server_id": server_id, + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + }, + ) + + def get_ordered_service( + self, + *, + ordered_service_id: int, + zone: Optional[Zone] = None, + ) -> Service: + """ + :param ordered_service_id: + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Service ` + + Usage: + :: + + result = api.get_ordered_service( + ordered_service_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ordered_service_id = validate_path_param( + "ordered_service_id", ordered_service_id + ) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/ordered-services/{param_ordered_service_id}", + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + def get_service( + self, + *, + service_id: int, + zone: Optional[Zone] = None, + ) -> Service: + """ + Get a specific service. + Get the service associated with the given ID. + :param service_id: ID of the service. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Service ` + + Usage: + :: + + result = api.get_service( + service_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_service_id = validate_path_param("service_id", service_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/zones/{param_zone}/services/{param_service_id}", + body={}, + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + def delete_service( + self, + *, + service_id: int, + zone: Optional[Zone] = None, + ) -> Service: + """ + Delete a specific service. + Delete the service associated with the given ID. + :param service_id: ID of the service. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Service ` + + Usage: + :: + + result = api.delete_service( + service_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_service_id = validate_path_param("service_id", service_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/zones/{param_zone}/services/{param_service_id}", + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + def list_services( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServicesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListServicesResponse: + """ + List services. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of service per page. + :param order_by: Order of the services. + :param project_id: Project ID. + :return: :class:`ListServicesResponse ` + + Usage: + :: + + result = api.list_services() + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/services", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListServicesResponse(res.json()) + + def list_services_all( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListServicesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[Service]: + """ + List services. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of service per page. + :param order_by: Order of the services. + :param project_id: Project ID. + :return: :class:`List[Service] ` + + Usage: + :: + + result = api.list_services_all() + """ + + return fetch_all_pages( + type=ListServicesResponse, + key="services", + fetcher=self.list_services, + args={ + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def install_server( + self, + *, + server_id: int, + os_id: int, + hostname: str, + zone: Optional[Zone] = None, + user_login: Optional[str] = None, + user_password: Optional[str] = None, + panel_password: Optional[str] = None, + root_password: Optional[str] = None, + partitions: Optional[List[InstallPartition]] = None, + ssh_key_ids: Optional[List[str]] = None, + license_offer_id: Optional[int] = None, + ip_id: Optional[int] = None, + ) -> ServerInstall: + """ + Install a baremetal server. + Install an OS on the server associated with the given ID. + :param server_id: Server ID to install. + :param os_id: OS ID to install on the server. + :param hostname: Hostname of the server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param user_login: User to install on the server. + :param user_password: User password to install on the server. + :param panel_password: Panel password to install on the server. + :param root_password: Root password to install on the server. + :param partitions: Partitions to install on the server. + :param ssh_key_ids: SSH key IDs authorized on the server. + :param license_offer_id: Offer ID of license to install on server. + :param ip_id: IP to link at the license to install on server. + :return: :class:`ServerInstall ` + + Usage: + :: + + result = api.install_server( + server_id=1, + os_id=1, + hostname="example", + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/install", + body=marshal_InstallServerRequest( + InstallServerRequest( + server_id=server_id, + os_id=os_id, + hostname=hostname, + zone=zone, + user_login=user_login, + user_password=user_password, + panel_password=panel_password, + root_password=root_password, + partitions=partitions, + ssh_key_ids=ssh_key_ids, + license_offer_id=license_offer_id, + ip_id=ip_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_ServerInstall(res.json()) + + def get_server_install( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> ServerInstall: + """ + Get a specific server installation status. + Get the server installation status associated with the given server ID. + :param server_id: Server ID of the server to install. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`ServerInstall ` + + Usage: + :: + + result = api.get_server_install( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/install", + ) + + self._throw_on_error(res) + return unmarshal_ServerInstall(res.json()) + + def wait_for_server_install( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + options: Optional[WaitForOptions[ServerInstall, bool]] = None, + ) -> ServerInstall: + """ + Get a specific server installation status. + Get the server installation status associated with the given server ID. + :param server_id: Server ID of the server to install. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`ServerInstall ` + + Usage: + :: + + result = api.get_server_install( + server_id=1, + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = ( + lambda res: res.status not in SERVER_INSTALL_TRANSIENT_STATUSES + ) + + return wait_for_resource( + fetcher=self.get_server_install, + options=options, + args={ + "server_id": server_id, + "zone": zone, + }, + ) + + def cancel_server_install( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Cancels the current (running) server installation. + Cancels the current server installation associated with the given server ID. + :param server_id: Server ID of the server to cancel install. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.cancel_server_install( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/cancel-install", + ) + + self._throw_on_error(res) + + def get_server_default_partitioning( + self, + *, + server_id: int, + os_id: int, + zone: Optional[Zone] = None, + ) -> ServerDefaultPartitioning: + """ + Get server default partitioning. + Get the server default partitioning schema associated with the given server ID and OS ID. + :param server_id: ID of the server. + :param os_id: OS ID of the default partitioning. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`ServerDefaultPartitioning ` + + Usage: + :: + + result = api.get_server_default_partitioning( + server_id=1, + os_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + param_os_id = validate_path_param("os_id", os_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/partitioning/{param_os_id}", + ) + + self._throw_on_error(res) + return unmarshal_ServerDefaultPartitioning(res.json()) + + def start_bmc_access( + self, + *, + server_id: int, + ip: str, + zone: Optional[Zone] = None, + ) -> None: + """ + Start BMC (Baseboard Management Controller) access for a given baremetal server. + Start BMC (Baseboard Management Controller) access associated with the given ID. + The BMC (Baseboard Management Controller) access is available one hour after the installation of the server. + :param server_id: ID of the server to start the BMC access. + :param ip: The IP authorized to connect to the given server. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.start_bmc_access( + server_id=1, + ip="example", + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/bmc-access", + body=marshal_StartBMCAccessRequest( + StartBMCAccessRequest( + server_id=server_id, + ip=ip, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def get_bmc_access( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> BMCAccess: + """ + Get BMC (Baseboard Management Controller) access for a given baremetal server. + Get the BMC (Baseboard Management Controller) access associated with the given ID. + :param server_id: ID of the server to get BMC access. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`BMCAccess ` + + Usage: + :: + + result = api.get_bmc_access( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/bmc-access", + ) + + self._throw_on_error(res) + return unmarshal_BMCAccess(res.json()) + + def wait_for_bmc_access( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + options: Optional[WaitForOptions[BMCAccess, bool]] = None, + ) -> BMCAccess: + """ + Get BMC (Baseboard Management Controller) access for a given baremetal server. + Get the BMC (Baseboard Management Controller) access associated with the given ID. + :param server_id: ID of the server to get BMC access. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`BMCAccess ` + + Usage: + :: + + result = api.get_bmc_access( + server_id=1, + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = lambda res: res.status not in BMC_ACCESS_TRANSIENT_STATUSES + + return wait_for_resource( + fetcher=self.get_bmc_access, + options=options, + args={ + "server_id": server_id, + "zone": zone, + }, + ) + + def stop_bmc_access( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Stop BMC (Baseboard Management Controller) access for a given baremetal server. + Stop BMC (Baseboard Management Controller) access associated with the given ID. + :param server_id: ID of the server to stop BMC access. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.stop_bmc_access( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/bmc-access", + ) + + self._throw_on_error(res) + + def list_offers( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListOffersRequestOrderBy] = None, + commercial_range: Optional[str] = None, + catalog: Optional[OfferCatalog] = None, + project_id: Optional[str] = None, + is_failover_ip: Optional[bool] = None, + is_failover_block: Optional[bool] = None, + sold_in: Optional[List[str]] = None, + available_only: Optional[bool] = None, + is_rpn_san: Optional[bool] = None, + ) -> ListOffersResponse: + """ + List offers. + List all available server offers. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of offer per page. + :param order_by: Order of the offers. + :param commercial_range: Filter on commercial range. + :param catalog: Filter on catalog. + :param project_id: Project ID. + :param is_failover_ip: Get the current failover IP offer. + :param is_failover_block: Get the current failover IP block offer. + :param sold_in: Filter offers depending on their datacenter. + :param available_only: Set this filter to true to only return available offers. + :param is_rpn_san: Get the RPN SAN offers. + :return: :class:`ListOffersResponse ` + + Usage: + :: + + result = api.list_offers() + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/offers", + params={ + "available_only": available_only, + "catalog": catalog, + "commercial_range": commercial_range, + "is_failover_block": is_failover_block, + "is_failover_ip": is_failover_ip, + "is_rpn_san": is_rpn_san, + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "sold_in": ",".join(sold_in) if sold_in and len(sold_in) > 0 else None, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListOffersResponse(res.json()) + + def list_offers_all( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListOffersRequestOrderBy] = None, + commercial_range: Optional[str] = None, + catalog: Optional[OfferCatalog] = None, + project_id: Optional[str] = None, + is_failover_ip: Optional[bool] = None, + is_failover_block: Optional[bool] = None, + sold_in: Optional[List[str]] = None, + available_only: Optional[bool] = None, + is_rpn_san: Optional[bool] = None, + ) -> List[Offer]: + """ + List offers. + List all available server offers. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of offer per page. + :param order_by: Order of the offers. + :param commercial_range: Filter on commercial range. + :param catalog: Filter on catalog. + :param project_id: Project ID. + :param is_failover_ip: Get the current failover IP offer. + :param is_failover_block: Get the current failover IP block offer. + :param sold_in: Filter offers depending on their datacenter. + :param available_only: Set this filter to true to only return available offers. + :param is_rpn_san: Get the RPN SAN offers. + :return: :class:`List[Offer] ` + + Usage: + :: + + result = api.list_offers_all() + """ + + return fetch_all_pages( + type=ListOffersResponse, + key="offers", + fetcher=self.list_offers, + args={ + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + "commercial_range": commercial_range, + "catalog": catalog, + "project_id": project_id, + "is_failover_ip": is_failover_ip, + "is_failover_block": is_failover_block, + "sold_in": sold_in, + "available_only": available_only, + "is_rpn_san": is_rpn_san, + }, + ) + + def get_offer( + self, + *, + offer_id: int, + zone: Optional[Zone] = None, + project_id: Optional[str] = None, + ) -> Offer: + """ + Get offer. + Return specific offer for the given ID. + :param offer_id: ID of offer. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param project_id: Project ID. + :return: :class:`Offer ` + + Usage: + :: + + result = api.get_offer( + offer_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_offer_id = validate_path_param("offer_id", offer_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/offers/{param_offer_id}", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_Offer(res.json()) + + def list_os( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListOSRequestOrderBy] = None, + type_: Optional[OSType] = None, + project_id: Optional[str] = None, + ) -> ListOSResponse: + """ + List all available OS that can be install on a baremetal server. + :param server_id: Filter OS by compatible server ID. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of OS per page. + :param order_by: Order of the OS. + :param type_: Type of the OS. + :param project_id: Project ID. + :return: :class:`ListOSResponse ` + + Usage: + :: + + result = api.list_os( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/os", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "server_id": server_id, + "type": type_, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListOSResponse(res.json()) + + def list_os_all( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListOSRequestOrderBy] = None, + type_: Optional[OSType] = None, + project_id: Optional[str] = None, + ) -> List[OS]: + """ + List all available OS that can be install on a baremetal server. + :param server_id: Filter OS by compatible server ID. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of OS per page. + :param order_by: Order of the OS. + :param type_: Type of the OS. + :param project_id: Project ID. + :return: :class:`List[OS] ` + + Usage: + :: + + result = api.list_os_all( + server_id=1, + ) + """ + + return fetch_all_pages( + type=ListOSResponse, + key="os", + fetcher=self.list_os, + args={ + "server_id": server_id, + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + "type_": type_, + "project_id": project_id, + }, + ) + + def get_os( + self, + *, + os_id: int, + server_id: int, + zone: Optional[Zone] = None, + project_id: Optional[str] = None, + ) -> OS: + """ + Get an OS with a given ID. + Return specific OS for the given ID. + :param os_id: ID of the OS. + :param server_id: ID of the server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param project_id: Project ID. + :return: :class:`OS ` + + Usage: + :: + + result = api.get_os( + os_id=1, + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_os_id = validate_path_param("os_id", os_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/os/{param_os_id}", + params={ + "project_id": project_id or self.client.default_project_id, + "server_id": server_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_OS(res.json()) + + def update_reverse( + self, + *, + ip_id: int, + reverse: str, + zone: Optional[Zone] = None, + ) -> IP: + """ + Update reverse of ip. + Update reverse of ip associated with the given ID. + :param ip_id: ID of the IP. + :param reverse: Reverse to apply on the IP. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`IP ` + + Usage: + :: + + result = api.update_reverse( + ip_id=1, + reverse="example", + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ip_id = validate_path_param("ip_id", ip_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/zones/{param_zone}/reverses/{param_ip_id}", + body=marshal_UpdateReverseRequest( + UpdateReverseRequest( + ip_id=ip_id, + reverse=reverse, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_IP(res.json()) + + def create_failover_i_ps( + self, + *, + offer_id: int, + quantity: int, + zone: Optional[Zone] = None, + project_id: Optional[str] = None, + ) -> CreateFailoverIPsResponse: + """ + Order failover IPs. + Order X failover IPs. + :param offer_id: Failover IP offer ID. + :param quantity: Quantity. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param project_id: Project ID. + :return: :class:`CreateFailoverIPsResponse ` + + Usage: + :: + + result = api.create_failover_i_ps( + offer_id=1, + quantity=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/failover-ips", + body=marshal_CreateFailoverIPsRequest( + CreateFailoverIPsRequest( + offer_id=offer_id, + quantity=quantity, + zone=zone, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_CreateFailoverIPsResponse(res.json()) + + def attach_failover_i_ps( + self, + *, + server_id: int, + fips_ids: List[int], + zone: Optional[Zone] = None, + ) -> None: + """ + Attach failovers on baremetal server. + Attach failovers on the server associated with the given ID. + :param server_id: ID of the server. + :param fips_ids: List of ID of failovers IP to attach. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.attach_failover_i_ps( + server_id=1, + fips_ids=[], + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/failover-ips/attach", + body=marshal_AttachFailoverIPsRequest( + AttachFailoverIPsRequest( + server_id=server_id, + fips_ids=fips_ids, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def detach_failover_i_ps( + self, + *, + fips_ids: List[int], + zone: Optional[Zone] = None, + ) -> None: + """ + Detach failovers on baremetal server. + Detach failovers on the server associated with the given ID. + :param fips_ids: List of IDs of failovers IP to detach. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.detach_failover_i_ps( + fips_ids=[], + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/failover-ips/detach", + body=marshal_DetachFailoverIPsRequest( + DetachFailoverIPsRequest( + fips_ids=fips_ids, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def attach_failover_ip_to_mac_address( + self, + *, + ip_id: int, + zone: Optional[Zone] = None, + type_: Optional[AttachFailoverIPToMacAddressRequestMacType] = None, + mac: Optional[str] = None, + ) -> IP: + """ + Attach a failover IP to a MAC address. + :param ip_id: ID of the failover IP. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param type_: A mac type. + :param mac: A valid mac address (existing or not). + :return: :class:`IP ` + + Usage: + :: + + result = api.attach_failover_ip_to_mac_address( + ip_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ip_id = validate_path_param("ip_id", ip_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/failover-ips/{param_ip_id}/attach-to-mac-address", + body=marshal_AttachFailoverIPToMacAddressRequest( + AttachFailoverIPToMacAddressRequest( + ip_id=ip_id, + zone=zone, + type_=type_, + mac=mac, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_IP(res.json()) + + def detach_failover_ip_from_mac_address( + self, + *, + ip_id: int, + zone: Optional[Zone] = None, + ) -> IP: + """ + Detach a failover IP from a MAC address. + :param ip_id: ID of the failover IP. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`IP ` + + Usage: + :: + + result = api.detach_failover_ip_from_mac_address( + ip_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ip_id = validate_path_param("ip_id", ip_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/failover-ips/{param_ip_id}/detach-from-mac-address", + body={}, + ) + + self._throw_on_error(res) + return unmarshal_IP(res.json()) + + def delete_failover_ip( + self, + *, + ip_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Delete a failover server. + Delete the failover associated with the given ID. + :param ip_id: ID of the failover IP to delete. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.delete_failover_ip( + ip_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ip_id = validate_path_param("ip_id", ip_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/zones/{param_zone}/failover-ips/{param_ip_id}", + ) + + self._throw_on_error(res) + + def list_failover_i_ps( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListFailoverIPsRequestOrderBy] = None, + project_id: Optional[str] = None, + search: Optional[str] = None, + only_available: Optional[bool] = None, + ) -> ListFailoverIPsResponse: + """ + List failovers for project. + List failovers servers for project. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of failovers IP per page. + :param order_by: Order of the failovers IP. + :param project_id: Filter failovers IP by project ID. + :param search: Filter failovers IP which matching with this field. + :param only_available: True: return all failovers IP not attached on server + false: return all failovers IP attached on server. + :return: :class:`ListFailoverIPsResponse ` + + Usage: + :: + + result = api.list_failover_i_ps() + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/failover-ips", + params={ + "only_available": only_available, + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "search": search, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListFailoverIPsResponse(res.json()) + + def list_failover_i_ps_all( + self, + *, + zone: Optional[Zone] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListFailoverIPsRequestOrderBy] = None, + project_id: Optional[str] = None, + search: Optional[str] = None, + only_available: Optional[bool] = None, + ) -> List[FailoverIP]: + """ + List failovers for project. + List failovers servers for project. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param page: Page number. + :param page_size: Number of failovers IP per page. + :param order_by: Order of the failovers IP. + :param project_id: Filter failovers IP by project ID. + :param search: Filter failovers IP which matching with this field. + :param only_available: True: return all failovers IP not attached on server + false: return all failovers IP attached on server. + :return: :class:`List[FailoverIP] ` + + Usage: + :: + + result = api.list_failover_i_ps_all() + """ + + return fetch_all_pages( + type=ListFailoverIPsResponse, + key="failover_ips", + fetcher=self.list_failover_i_ps, + args={ + "zone": zone, + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + "search": search, + "only_available": only_available, + }, + ) + + def get_failover_ip( + self, + *, + ip_id: int, + zone: Optional[Zone] = None, + ) -> FailoverIP: + """ + Get a specific baremetal server. + Get the server associated with the given ID. + :param ip_id: ID of the failover IP. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`FailoverIP ` + + Usage: + :: + + result = api.get_failover_ip( + ip_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_ip_id = validate_path_param("ip_id", ip_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/failover-ips/{param_ip_id}", + ) + + self._throw_on_error(res) + return unmarshal_FailoverIP(res.json()) + + def get_remaining_quota( + self, + *, + zone: Optional[Zone] = None, + project_id: Optional[str] = None, + ) -> GetRemainingQuotaResponse: + """ + Get remaining quota. + :param zone: Zone to target. If none is passed will use default zone from the config. + :param project_id: Project ID. + :return: :class:`GetRemainingQuotaResponse ` + + Usage: + :: + + result = api.get_remaining_quota() + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/remaining-quota", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_GetRemainingQuotaResponse(res.json()) + + def get_raid( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> Raid: + """ + Get raid. + Return raid for the given server ID. + :param server_id: ID of the server. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Raid ` + + Usage: + :: + + result = api.get_raid( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/raid", + ) + + self._throw_on_error(res) + return unmarshal_Raid(res.json()) + + def update_raid( + self, + *, + server_id: int, + raid_arrays: List[UpdatableRaidArray], + zone: Optional[Zone] = None, + ) -> None: + """ + Update RAID. + Update RAID associated with the given server ID. + :param server_id: ID of the server. + :param raid_arrays: RAIDs to update. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.update_raid( + server_id=1, + raid_arrays=[], + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/update-raid", + body=marshal_UpdateRaidRequest( + UpdateRaidRequest( + server_id=server_id, + raid_arrays=raid_arrays, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def start_rescue( + self, + *, + server_id: int, + os_id: int, + zone: Optional[Zone] = None, + ) -> Rescue: + """ + Start in rescue baremetal server. + Start in rescue the server associated with the given ID. + :param server_id: ID of the server to start rescue. + :param os_id: OS ID to use to start rescue. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Rescue ` + + Usage: + :: + + result = api.start_rescue( + server_id=1, + os_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "POST", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/rescue", + body=marshal_StartRescueRequest( + StartRescueRequest( + server_id=server_id, + os_id=os_id, + zone=zone, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Rescue(res.json()) + + def get_rescue( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> Rescue: + """ + Get rescue information. + Return rescue information for the given server ID. + :param server_id: ID of the server to get rescue. + :param zone: Zone to target. If none is passed will use default zone from the config. + :return: :class:`Rescue ` + + Usage: + :: + + result = api.get_rescue( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "GET", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/rescue", + ) + + self._throw_on_error(res) + return unmarshal_Rescue(res.json()) + + def stop_rescue( + self, + *, + server_id: int, + zone: Optional[Zone] = None, + ) -> None: + """ + Stop rescue on baremetal server. + Stop rescue on the server associated with the given ID. + :param server_id: ID of the server to stop rescue. + :param zone: Zone to target. If none is passed will use default zone from the config. + + Usage: + :: + + result = api.stop_rescue( + server_id=1, + ) + """ + + param_zone = validate_path_param("zone", zone or self.client.default_zone) + param_server_id = validate_path_param("server_id", server_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/zones/{param_zone}/servers/{param_server_id}/rescue", + ) + + self._throw_on_error(res) + + +class DediboxV1BillingAPI(API): + """ + Dedibox Phoenix Billing API. + """ + + def list_invoices( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListInvoicesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListInvoicesResponse: + """ + :param page: + :param page_size: + :param order_by: + :param project_id: + :return: :class:`ListInvoicesResponse ` + + Usage: + :: + + result = api.list_invoices() + """ + + res = self._request( + "GET", + "/dedibox/v1/invoices", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListInvoicesResponse(res.json()) + + def list_invoices_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListInvoicesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[InvoiceSummary]: + """ + :param page: + :param page_size: + :param order_by: + :param project_id: + :return: :class:`List[InvoiceSummary] ` + + Usage: + :: + + result = api.list_invoices_all() + """ + + return fetch_all_pages( + type=ListInvoicesResponse, + key="invoices", + fetcher=self.list_invoices, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def get_invoice( + self, + *, + invoice_id: int, + ) -> Invoice: + """ + :param invoice_id: + :return: :class:`Invoice ` + + Usage: + :: + + result = api.get_invoice( + invoice_id=1, + ) + """ + + param_invoice_id = validate_path_param("invoice_id", invoice_id) + + res = self._request( + "GET", + f"/dedibox/v1/invoices/{param_invoice_id}", + ) + + self._throw_on_error(res) + return unmarshal_Invoice(res.json()) + + def download_invoice( + self, + *, + invoice_id: int, + ) -> ScwFile: + """ + :param invoice_id: + :return: :class:`ScwFile ` + + Usage: + :: + + result = api.download_invoice( + invoice_id=1, + ) + """ + + param_invoice_id = validate_path_param("invoice_id", invoice_id) + + res = self._request( + "GET", + f"/dedibox/v1/invoices/{param_invoice_id}/download", + ) + + self._throw_on_error(res) + return unmarshal_ScwFile(res.json()) + + def list_refunds( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRefundsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRefundsResponse: + """ + :param page: + :param page_size: + :param order_by: + :param project_id: + :return: :class:`ListRefundsResponse ` + + Usage: + :: + + result = api.list_refunds() + """ + + res = self._request( + "GET", + "/dedibox/v1/refunds", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRefundsResponse(res.json()) + + def list_refunds_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRefundsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RefundSummary]: + """ + :param page: + :param page_size: + :param order_by: + :param project_id: + :return: :class:`List[RefundSummary] ` + + Usage: + :: + + result = api.list_refunds_all() + """ + + return fetch_all_pages( + type=ListRefundsResponse, + key="refunds", + fetcher=self.list_refunds, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def get_refund( + self, + *, + refund_id: int, + ) -> Refund: + """ + :param refund_id: + :return: :class:`Refund ` + + Usage: + :: + + result = api.get_refund( + refund_id=1, + ) + """ + + param_refund_id = validate_path_param("refund_id", refund_id) + + res = self._request( + "GET", + f"/dedibox/v1/refunds/{param_refund_id}", + ) + + self._throw_on_error(res) + return unmarshal_Refund(res.json()) + + def download_refund( + self, + *, + refund_id: int, + ) -> ScwFile: + """ + :param refund_id: + :return: :class:`ScwFile ` + + Usage: + :: + + result = api.download_refund( + refund_id=1, + ) + """ + + param_refund_id = validate_path_param("refund_id", refund_id) + + res = self._request( + "GET", + f"/dedibox/v1/refunds/{param_refund_id}/download", + ) + + self._throw_on_error(res) + return unmarshal_ScwFile(res.json()) + + def can_order( + self, + *, + project_id: Optional[str] = None, + ) -> CanOrderResponse: + """ + :param project_id: + :return: :class:`CanOrderResponse ` + + Usage: + :: + + result = api.can_order() + """ + + res = self._request( + "GET", + "/dedibox/v1/can-order", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_CanOrderResponse(res.json()) + + +class DediboxV1IPv6BlockAPI(API): + """ + Dedibox Phoenix IPv6 Block API. + """ + + def get_i_pv6_block_quotas( + self, + *, + project_id: Optional[str] = None, + ) -> GetIPv6BlockQuotasResponse: + """ + Get IPv6 block quota. + Get IPv6 block quota with the given project ID. + /48 one per organization. + /56 link to your number of server. + /64 link to your number of failover IP. + :param project_id: ID of the project. + :return: :class:`GetIPv6BlockQuotasResponse ` + + Usage: + :: + + result = api.get_i_pv6_block_quotas() + """ + + res = self._request( + "GET", + "/dedibox/v1/ipv6-block-quotas", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_GetIPv6BlockQuotasResponse(res.json()) + + def create_i_pv6_block( + self, + *, + project_id: Optional[str] = None, + ) -> IPv6Block: + """ + Create IPv6 block for baremetal server. + Create IPv6 block associated with the given project ID. + :param project_id: ID of the project. + :return: :class:`IPv6Block ` + + Usage: + :: + + result = api.create_i_pv6_block() + """ + + res = self._request( + "POST", + "/dedibox/v1/ipv6-block", + body=marshal_IPv6BlockApiCreateIPv6BlockRequest( + IPv6BlockApiCreateIPv6BlockRequest( + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_IPv6Block(res.json()) + + def get_i_pv6_block( + self, + *, + project_id: Optional[str] = None, + ) -> IPv6Block: + """ + Get a specific IPv6 block. + Get the IPv6 block associated with the given ID. + :param project_id: ID of the project. + :return: :class:`IPv6Block ` + + Usage: + :: + + result = api.get_i_pv6_block() + """ + + res = self._request( + "GET", + "/dedibox/v1/ipv6-block", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_IPv6Block(res.json()) + + def update_i_pv6_block( + self, + *, + block_id: int, + nameservers: Optional[List[str]] = None, + ) -> IPv6Block: + """ + Update IPv6 block. + Update DNS associated to IPv6 block. + If DNS is used, minimum of 2 is necessary and maximum of 5 (no duplicate). + :param block_id: ID of the IPv6 block. + :param nameservers: DNS to link to the IPv6. + :return: :class:`IPv6Block ` + + Usage: + :: + + result = api.update_i_pv6_block( + block_id=1, + ) + """ + + param_block_id = validate_path_param("block_id", block_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/ipv6-blocks/{param_block_id}", + body=marshal_IPv6BlockApiUpdateIPv6BlockRequest( + IPv6BlockApiUpdateIPv6BlockRequest( + block_id=block_id, + nameservers=nameservers, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_IPv6Block(res.json()) + + def delete_i_pv6_block( + self, + *, + block_id: int, + ) -> None: + """ + Delete IPv6 block. + Delete IPv6 block subnet with the given ID. + :param block_id: ID of the IPv6 block to delete. + + Usage: + :: + + result = api.delete_i_pv6_block( + block_id=1, + ) + """ + + param_block_id = validate_path_param("block_id", block_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/ipv6-blocks/{param_block_id}", + ) + + self._throw_on_error(res) + + def create_i_pv6_block_subnet( + self, + *, + block_id: int, + address: str, + cidr: int, + ) -> IPv6Block: + """ + Create IPv6 block subnet. + Create IPv6 block subnet for the given IP ID. + /48 could create subnet in /56 (quota link to your number of server). + /56 could create subnet in /64 (quota link to your number of failover IP). + :param block_id: ID of the IPv6 block. + :param address: Address of the IPv6. + :param cidr: Classless InterDomain Routing notation of the IPv6. + :return: :class:`IPv6Block ` + + Usage: + :: + + result = api.create_i_pv6_block_subnet( + block_id=1, + address="example", + cidr=1, + ) + """ + + param_block_id = validate_path_param("block_id", block_id) + + res = self._request( + "POST", + f"/dedibox/v1/ipv6-blocks/{param_block_id}/subnets", + body=marshal_IPv6BlockApiCreateIPv6BlockSubnetRequest( + IPv6BlockApiCreateIPv6BlockSubnetRequest( + block_id=block_id, + address=address, + cidr=cidr, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_IPv6Block(res.json()) + + def list_i_pv6_block_subnets_available( + self, + *, + block_id: int, + ) -> ListIPv6BlockSubnetsAvailableResponse: + """ + List available IPv6 block subnets. + List all available IPv6 block subnets for given IP ID. + :param block_id: ID of the IPv6 block. + :return: :class:`ListIPv6BlockSubnetsAvailableResponse ` + + Usage: + :: + + result = api.list_i_pv6_block_subnets_available( + block_id=1, + ) + """ + + param_block_id = validate_path_param("block_id", block_id) + + res = self._request( + "GET", + f"/dedibox/v1/ipv6-blocks/{param_block_id}/subnets", + ) + + self._throw_on_error(res) + return unmarshal_ListIPv6BlockSubnetsAvailableResponse(res.json()) + + +class DediboxV1RpnAPI(API): + """ + Dedibox Phoenix RPN API. + """ + + def list_rpn_server_capabilities( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnServerCapabilitiesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnServerCapabilitiesResponse: + """ + :param page: Page number. + :param page_size: Number of servers per page. + :param order_by: Order of the servers. + :param project_id: Filter servers by project ID. + :return: :class:`ListRpnServerCapabilitiesResponse ` + + Usage: + :: + + result = api.list_rpn_server_capabilities() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpn/server-capabilities", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnServerCapabilitiesResponse(res.json()) + + def list_rpn_server_capabilities_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnServerCapabilitiesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnServerCapability]: + """ + :param page: Page number. + :param page_size: Number of servers per page. + :param order_by: Order of the servers. + :param project_id: Filter servers by project ID. + :return: :class:`List[RpnServerCapability] ` + + Usage: + :: + + result = api.list_rpn_server_capabilities_all() + """ + + return fetch_all_pages( + type=ListRpnServerCapabilitiesResponse, + key="servers", + fetcher=self.list_rpn_server_capabilities, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def get_rpn_status( + self, + *, + project_id: Optional[str] = None, + rpnv1_group_id: Optional[int] = None, + rpnv2_group_id: Optional[int] = None, + ) -> GetRpnStatusResponse: + """ + :param project_id: A project ID. + :param rpnv1_group_id: An RPN v1 group ID. + :param rpnv2_group_id: An RPN v2 group ID. + :return: :class:`GetRpnStatusResponse ` + + Usage: + :: + + result = api.get_rpn_status() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpn/status", + params={ + "project_id": project_id or self.client.default_project_id, + "rpnv1_group_id": rpnv1_group_id, + "rpnv2_group_id": rpnv2_group_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_GetRpnStatusResponse(res.json()) + + +class DediboxV1RpnSanAPI(API): + """ + Dedibox Phoenix RPN SAN API. + """ + + def list_rpn_sans( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnSansRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnSansResponse: + """ + :param page: Page number. + :param page_size: Number of RPN SANs per page. + :param order_by: Order of the RPN SANs. + :param project_id: Filter RPN SANs by project ID. + :return: :class:`ListRpnSansResponse ` + + Usage: + :: + + result = api.list_rpn_sans() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpn-sans", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnSansResponse(res.json()) + + def list_rpn_sans_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnSansRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnSanSummary]: + """ + :param page: Page number. + :param page_size: Number of RPN SANs per page. + :param order_by: Order of the RPN SANs. + :param project_id: Filter RPN SANs by project ID. + :return: :class:`List[RpnSanSummary] ` + + Usage: + :: + + result = api.list_rpn_sans_all() + """ + + return fetch_all_pages( + type=ListRpnSansResponse, + key="rpn_sans", + fetcher=self.list_rpn_sans, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def get_rpn_san( + self, + *, + rpn_san_id: int, + ) -> RpnSan: + """ + :param rpn_san_id: RPN SAN ID. + :return: :class:`RpnSan ` + + Usage: + :: + + result = api.get_rpn_san( + rpn_san_id=1, + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}", + ) + + self._throw_on_error(res) + return unmarshal_RpnSan(res.json()) + + def wait_for_rpn_san( + self, + *, + rpn_san_id: int, + options: Optional[WaitForOptions[RpnSan, bool]] = None, + ) -> RpnSan: + """ + :param rpn_san_id: RPN SAN ID. + :return: :class:`RpnSan ` + + Usage: + :: + + result = api.get_rpn_san( + rpn_san_id=1, + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = lambda res: res.status not in RPN_SAN_TRANSIENT_STATUSES + + return wait_for_resource( + fetcher=self.get_rpn_san, + options=options, + args={ + "rpn_san_id": rpn_san_id, + }, + ) + + def delete_rpn_san( + self, + *, + rpn_san_id: int, + ) -> None: + """ + :param rpn_san_id: RPN SAN ID. + + Usage: + :: + + result = api.delete_rpn_san( + rpn_san_id=1, + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}", + ) + + self._throw_on_error(res) + + def create_rpn_san( + self, + *, + offer_id: int, + project_id: Optional[str] = None, + ) -> Service: + """ + :param offer_id: Offer ID. + :param project_id: Your project ID. + :return: :class:`Service ` + + Usage: + :: + + result = api.create_rpn_san( + offer_id=1, + ) + """ + + res = self._request( + "POST", + "/dedibox/v1/rpn-sans", + body=marshal_RpnSanApiCreateRpnSanRequest( + RpnSanApiCreateRpnSanRequest( + offer_id=offer_id, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Service(res.json()) + + def list_ips( + self, + *, + rpn_san_id: int, + type_: Optional[RpnSanIpType] = None, + ) -> ListIpsResponse: + """ + :param rpn_san_id: RPN SAN ID. + :param type_: Filter by IP type (server | rpnv2_subnet). + :return: :class:`ListIpsResponse ` + + Usage: + :: + + result = api.list_ips( + rpn_san_id=1, + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}/ips", + params={ + "type": type_, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListIpsResponse(res.json()) + + def add_ip( + self, + *, + rpn_san_id: int, + ip_ids: List[int], + ) -> None: + """ + :param rpn_san_id: RPN SAN ID. + :param ip_ids: An array of IP ID. + + Usage: + :: + + result = api.add_ip( + rpn_san_id=1, + ip_ids=[], + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}/ips", + body=marshal_RpnSanApiAddIpRequest( + RpnSanApiAddIpRequest( + rpn_san_id=rpn_san_id, + ip_ids=ip_ids, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def remove_ip( + self, + *, + rpn_san_id: int, + ip_ids: List[int], + ) -> None: + """ + :param rpn_san_id: RPN SAN ID. + :param ip_ids: An array of IP ID. + + Usage: + :: + + result = api.remove_ip( + rpn_san_id=1, + ip_ids=[], + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}/ips", + body=marshal_RpnSanApiRemoveIpRequest( + RpnSanApiRemoveIpRequest( + rpn_san_id=rpn_san_id, + ip_ids=ip_ids, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def list_available_ips( + self, + *, + rpn_san_id: int, + type_: Optional[RpnSanIpType] = None, + ) -> ListIpsResponse: + """ + :param rpn_san_id: RPN SAN ID. + :param type_: Filter by IP type (server | rpnv2_subnet). + :return: :class:`ListIpsResponse ` + + Usage: + :: + + result = api.list_available_ips( + rpn_san_id=1, + ) + """ + + param_rpn_san_id = validate_path_param("rpn_san_id", rpn_san_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpn-sans/{param_rpn_san_id}/available-ips", + params={ + "type": type_, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListIpsResponse(res.json()) + + +class DediboxV1RpnV1API(API): + """ + Dedibox Phoenix RPN v1 API. + """ + + def list_rpn_groups( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnGroupsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnGroupsResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v1 groups per page. + :param order_by: Order of the rpn v1 groups. + :param project_id: Filter rpn v1 groups by project ID. + :return: :class:`ListRpnGroupsResponse ` + + Usage: + :: + + result = api.list_rpn_groups() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv1/groups", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnGroupsResponse(res.json()) + + def list_rpn_groups_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnGroupsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnGroup]: + """ + :param page: Page number. + :param page_size: Number of rpn v1 groups per page. + :param order_by: Order of the rpn v1 groups. + :param project_id: Filter rpn v1 groups by project ID. + :return: :class:`List[RpnGroup] ` + + Usage: + :: + + result = api.list_rpn_groups_all() + """ + + return fetch_all_pages( + type=ListRpnGroupsResponse, + key="rpn_groups", + fetcher=self.list_rpn_groups, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def get_rpn_group( + self, + *, + group_id: int, + ) -> RpnGroup: + """ + :param group_id: Rpn v1 group ID. + :return: :class:`RpnGroup ` + + Usage: + :: + + result = api.get_rpn_group( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpnv1/groups/{param_group_id}", + ) + + self._throw_on_error(res) + return unmarshal_RpnGroup(res.json()) + + def create_rpn_group( + self, + *, + name: str, + server_ids: Optional[List[int]] = None, + san_server_ids: Optional[List[int]] = None, + project_id: Optional[str] = None, + ) -> RpnGroup: + """ + :param name: Rpn v1 group name. + :param server_ids: A collection of rpn v1 capable servers. + :param san_server_ids: A collection of rpn v1 capable rpn sans servers. + :param project_id: A project ID. + :return: :class:`RpnGroup ` + + Usage: + :: + + result = api.create_rpn_group( + name="example", + ) + """ + + res = self._request( + "POST", + "/dedibox/v1/rpnv1/groups", + body=marshal_RpnV1ApiCreateRpnGroupRequest( + RpnV1ApiCreateRpnGroupRequest( + name=name, + server_ids=server_ids, + san_server_ids=san_server_ids, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnGroup(res.json()) + + def delete_rpn_group( + self, + *, + group_id: int, + ) -> None: + """ + :param group_id: Rpn v1 group ID. + + Usage: + :: + + result = api.delete_rpn_group( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpnv1/groups/{param_group_id}", + ) + + self._throw_on_error(res) + + def update_rpn_group_name( + self, + *, + group_id: int, + name: Optional[str] = None, + ) -> RpnGroup: + """ + :param group_id: Rpn v1 group ID. + :param name: New rpn v1 group name. + :return: :class:`RpnGroup ` + + Usage: + :: + + result = api.update_rpn_group_name( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/rpnv1/groups/{param_group_id}", + body=marshal_RpnV1ApiUpdateRpnGroupNameRequest( + RpnV1ApiUpdateRpnGroupNameRequest( + group_id=group_id, + name=name, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnGroup(res.json()) + + def list_rpn_group_members( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnGroupMembersRequestOrderBy] = None, + group_id: int, + project_id: Optional[str] = None, + ) -> ListRpnGroupMembersResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v1 group members per page. + :param order_by: Order of the rpn v1 group members. + :param group_id: Filter rpn v1 group members by group ID. + :param project_id: A project ID. + :return: :class:`ListRpnGroupMembersResponse ` + + Usage: + :: + + result = api.list_rpn_group_members( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpnv1/groups/{param_group_id}/members", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnGroupMembersResponse(res.json()) + + def list_rpn_group_members_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnGroupMembersRequestOrderBy] = None, + group_id: int, + project_id: Optional[str] = None, + ) -> List[RpnGroupMember]: + """ + :param page: Page number. + :param page_size: Number of rpn v1 group members per page. + :param order_by: Order of the rpn v1 group members. + :param group_id: Filter rpn v1 group members by group ID. + :param project_id: A project ID. + :return: :class:`List[RpnGroupMember] ` + + Usage: + :: + + result = api.list_rpn_group_members_all( + group_id=1, + ) + """ + + return fetch_all_pages( + type=ListRpnGroupMembersResponse, + key="members", + fetcher=self.list_rpn_group_members, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "group_id": group_id, + "project_id": project_id, + }, + ) + + def rpn_group_invite( + self, + *, + group_id: int, + server_ids: List[int], + project_id: Optional[str] = None, + ) -> None: + """ + :param group_id: The RPN V1 group ID. + :param server_ids: A collection of external server IDs. + :param project_id: A project ID. + + Usage: + :: + + result = api.rpn_group_invite( + group_id=1, + server_ids=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv1/groups/{param_group_id}/invite", + body=marshal_RpnV1ApiRpnGroupInviteRequest( + RpnV1ApiRpnGroupInviteRequest( + group_id=group_id, + server_ids=server_ids, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def leave_rpn_group( + self, + *, + group_id: int, + member_ids: List[int], + project_id: Optional[str] = None, + ) -> None: + """ + :param group_id: The RPN V1 group ID. + :param member_ids: A collection of rpn v1 group members IDs. + :param project_id: A project ID. + + Usage: + :: + + result = api.leave_rpn_group( + group_id=1, + member_ids=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv1/groups/{param_group_id}/leave", + body=marshal_RpnV1ApiLeaveRpnGroupRequest( + RpnV1ApiLeaveRpnGroupRequest( + group_id=group_id, + member_ids=member_ids, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def add_rpn_group_members( + self, + *, + group_id: int, + server_ids: Optional[List[int]] = None, + san_server_ids: Optional[List[int]] = None, + ) -> RpnGroup: + """ + :param group_id: The rpn v1 group ID. + :param server_ids: A collection of rpn v1 capable server IDs. + :param san_server_ids: A collection of rpn v1 capable RPN SAN server IDs. + :return: :class:`RpnGroup ` + + Usage: + :: + + result = api.add_rpn_group_members( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv1/groups/{param_group_id}/members", + body=marshal_RpnV1ApiAddRpnGroupMembersRequest( + RpnV1ApiAddRpnGroupMembersRequest( + group_id=group_id, + server_ids=server_ids, + san_server_ids=san_server_ids, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnGroup(res.json()) + + def delete_rpn_group_members( + self, + *, + group_id: int, + member_ids: List[int], + ) -> RpnGroup: + """ + :param group_id: The rpn v1 group ID. + :param member_ids: A collection of rpn v1 group members IDs. + :return: :class:`RpnGroup ` + + Usage: + :: + + result = api.delete_rpn_group_members( + group_id=1, + member_ids=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpnv1/groups/{param_group_id}/members", + body=marshal_RpnV1ApiDeleteRpnGroupMembersRequest( + RpnV1ApiDeleteRpnGroupMembersRequest( + group_id=group_id, + member_ids=member_ids, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnGroup(res.json()) + + def list_rpn_capable_servers( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnCapableServersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnCapableServersResponse: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`ListRpnCapableServersResponse ` + + Usage: + :: + + result = api.list_rpn_capable_servers() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv1/capable-servers", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnCapableServersResponse(res.json()) + + def list_rpn_capable_servers_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnCapableServersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[Server]: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`List[Server] ` + + Usage: + :: + + result = api.list_rpn_capable_servers_all() + """ + + return fetch_all_pages( + type=ListRpnCapableServersResponse, + key="servers", + fetcher=self.list_rpn_capable_servers, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def list_rpn_capable_san_servers( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnCapableSanServersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnCapableSanServersResponse: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`ListRpnCapableSanServersResponse ` + + Usage: + :: + + result = api.list_rpn_capable_san_servers() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv1/capable-san-servers", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnCapableSanServersResponse(res.json()) + + def list_rpn_capable_san_servers_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnCapableSanServersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnSanServer]: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`List[RpnSanServer] ` + + Usage: + :: + + result = api.list_rpn_capable_san_servers_all() + """ + + return fetch_all_pages( + type=ListRpnCapableSanServersResponse, + key="san_servers", + fetcher=self.list_rpn_capable_san_servers, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def list_rpn_invites( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnInvitesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnInvitesResponse: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`ListRpnInvitesResponse ` + + Usage: + :: + + result = api.list_rpn_invites() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv1/invites", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnInvitesResponse(res.json()) + + def list_rpn_invites_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnInvitesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnGroupMember]: + """ + :param page: Page number. + :param page_size: Number of rpn capable resources per page. + :param order_by: Order of the rpn capable resources. + :param project_id: Filter rpn capable resources by project ID. + :return: :class:`List[RpnGroupMember] ` + + Usage: + :: + + result = api.list_rpn_invites_all() + """ + + return fetch_all_pages( + type=ListRpnInvitesResponse, + key="members", + fetcher=self.list_rpn_invites, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def accept_rpn_invite( + self, + *, + member_id: int, + ) -> None: + """ + :param member_id: The member ID. + + Usage: + :: + + result = api.accept_rpn_invite( + member_id=1, + ) + """ + + param_member_id = validate_path_param("member_id", member_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv1/invites/{param_member_id}/accept", + ) + + self._throw_on_error(res) + + def refuse_rpn_invite( + self, + *, + member_id: int, + ) -> None: + """ + :param member_id: The member ID. + + Usage: + :: + + result = api.refuse_rpn_invite( + member_id=1, + ) + """ + + param_member_id = validate_path_param("member_id", member_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv1/invites/{param_member_id}/refuse", + ) + + self._throw_on_error(res) + + +class DediboxV1RpnV2API(API): + """ + Dedibox Phoenix RPN v2 API. + """ + + def list_rpn_v2_groups( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2GroupsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnV2GroupsResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v2 groups per page. + :param order_by: Order of the rpn v2 groups. + :param project_id: Filter rpn v2 groups by project ID. + :return: :class:`ListRpnV2GroupsResponse ` + + Usage: + :: + + result = api.list_rpn_v2_groups() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv2/groups", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnV2GroupsResponse(res.json()) + + def list_rpn_v2_groups_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2GroupsRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[RpnV2Group]: + """ + :param page: Page number. + :param page_size: Number of rpn v2 groups per page. + :param order_by: Order of the rpn v2 groups. + :param project_id: Filter rpn v2 groups by project ID. + :return: :class:`List[RpnV2Group] ` + + Usage: + :: + + result = api.list_rpn_v2_groups_all() + """ + + return fetch_all_pages( + type=ListRpnV2GroupsResponse, + key="rpn_groups", + fetcher=self.list_rpn_v2_groups, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def list_rpn_v2_members( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2MembersRequestOrderBy] = None, + group_id: int, + type_: Optional[ListRpnV2MembersRequestType] = None, + ) -> ListRpnV2MembersResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v2 group members per page. + :param order_by: Order of the rpn v2 group members. + :param group_id: RPN V2 group ID. + :param type_: Filter members by type. + :return: :class:`ListRpnV2MembersResponse ` + + Usage: + :: + + result = api.list_rpn_v2_members( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/members", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "type": type_, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnV2MembersResponse(res.json()) + + def list_rpn_v2_members_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2MembersRequestOrderBy] = None, + group_id: int, + type_: Optional[ListRpnV2MembersRequestType] = None, + ) -> List[RpnV2Member]: + """ + :param page: Page number. + :param page_size: Number of rpn v2 group members per page. + :param order_by: Order of the rpn v2 group members. + :param group_id: RPN V2 group ID. + :param type_: Filter members by type. + :return: :class:`List[RpnV2Member] ` + + Usage: + :: + + result = api.list_rpn_v2_members_all( + group_id=1, + ) + """ + + return fetch_all_pages( + type=ListRpnV2MembersResponse, + key="members", + fetcher=self.list_rpn_v2_members, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "group_id": group_id, + "type_": type_, + }, + ) + + def get_rpn_v2_group( + self, + *, + group_id: int, + ) -> RpnV2Group: + """ + :param group_id: RPN V2 group ID. + :return: :class:`RpnV2Group ` + + Usage: + :: + + result = api.get_rpn_v2_group( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpnv2/groups/{param_group_id}", + ) + + self._throw_on_error(res) + return unmarshal_RpnV2Group(res.json()) + + def wait_for_rpn_v2_group( + self, + *, + group_id: int, + options: Optional[WaitForOptions[RpnV2Group, bool]] = None, + ) -> RpnV2Group: + """ + :param group_id: RPN V2 group ID. + :return: :class:`RpnV2Group ` + + Usage: + :: + + result = api.get_rpn_v2_group( + group_id=1, + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = lambda res: res.status not in RPN_V2_GROUP_TRANSIENT_STATUSES + + return wait_for_resource( + fetcher=self.get_rpn_v2_group, + options=options, + args={ + "group_id": group_id, + }, + ) + + def create_rpn_v2_group( + self, + *, + name: str, + servers: List[int], + project_id: Optional[str] = None, + type_: Optional[RpnV2GroupType] = None, + ) -> RpnV2Group: + """ + :param name: RPN V2 group name. + :param servers: A collection of server IDs. + :param project_id: Project ID of the RPN V2 group. + :param type_: RPN V2 group type (qing / standard). + :return: :class:`RpnV2Group ` + + Usage: + :: + + result = api.create_rpn_v2_group( + name="example", + servers=[], + ) + """ + + res = self._request( + "POST", + "/dedibox/v1/rpnv2/groups", + body=marshal_RpnV2ApiCreateRpnV2GroupRequest( + RpnV2ApiCreateRpnV2GroupRequest( + name=name, + servers=servers, + project_id=project_id, + type_=type_, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnV2Group(res.json()) + + def delete_rpn_v2_group( + self, + *, + group_id: int, + ) -> None: + """ + :param group_id: RPN V2 group ID. + + Usage: + :: + + result = api.delete_rpn_v2_group( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpnv2/groups/{param_group_id}", + ) + + self._throw_on_error(res) + + def update_rpn_v2_group_name( + self, + *, + group_id: int, + name: Optional[str] = None, + ) -> RpnV2Group: + """ + :param group_id: RPN V2 group ID. + :param name: RPN V2 group name. + :return: :class:`RpnV2Group ` + + Usage: + :: + + result = api.update_rpn_v2_group_name( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/rpnv2/groups/{param_group_id}", + body=marshal_RpnV2ApiUpdateRpnV2GroupNameRequest( + RpnV2ApiUpdateRpnV2GroupNameRequest( + group_id=group_id, + name=name, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_RpnV2Group(res.json()) + + def add_rpn_v2_members( + self, + *, + group_id: int, + servers: List[int], + ) -> None: + """ + :param group_id: RPN V2 group ID. + :param servers: A collection of server IDs. + + Usage: + :: + + result = api.add_rpn_v2_members( + group_id=1, + servers=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/members", + body=marshal_RpnV2ApiAddRpnV2MembersRequest( + RpnV2ApiAddRpnV2MembersRequest( + group_id=group_id, + servers=servers, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def delete_rpn_v2_members( + self, + *, + group_id: int, + member_ids: List[int], + ) -> None: + """ + :param group_id: RPN V2 group ID. + :param member_ids: A collection of member IDs. + + Usage: + :: + + result = api.delete_rpn_v2_members( + group_id=1, + member_ids=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "DELETE", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/members", + body=marshal_RpnV2ApiDeleteRpnV2MembersRequest( + RpnV2ApiDeleteRpnV2MembersRequest( + group_id=group_id, + member_ids=member_ids, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def list_rpn_v2_capable_resources( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2CapableResourcesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListRpnV2CapableResourcesResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v2 capable resources per page. + :param order_by: Order of the rpn v2 capable resources. + :param project_id: Filter rpn v2 capable resources by project ID. + :return: :class:`ListRpnV2CapableResourcesResponse ` + + Usage: + :: + + result = api.list_rpn_v2_capable_resources() + """ + + res = self._request( + "GET", + "/dedibox/v1/rpnv2/groups/capable", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnV2CapableResourcesResponse(res.json()) + + def list_rpn_v2_capable_resources_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2CapableResourcesRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[Server]: + """ + :param page: Page number. + :param page_size: Number of rpn v2 capable resources per page. + :param order_by: Order of the rpn v2 capable resources. + :param project_id: Filter rpn v2 capable resources by project ID. + :return: :class:`List[Server] ` + + Usage: + :: + + result = api.list_rpn_v2_capable_resources_all() + """ + + return fetch_all_pages( + type=ListRpnV2CapableResourcesResponse, + key="servers", + fetcher=self.list_rpn_v2_capable_resources, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def list_rpn_v2_group_logs( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2GroupLogsRequestOrderBy] = None, + group_id: int, + ) -> ListRpnV2GroupLogsResponse: + """ + :param page: Page number. + :param page_size: Number of rpn v2 group logs per page. + :param order_by: Order of the rpn v2 group logs. + :param group_id: RPN V2 group ID. + :return: :class:`ListRpnV2GroupLogsResponse ` + + Usage: + :: + + result = api.list_rpn_v2_group_logs( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "GET", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/logs", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListRpnV2GroupLogsResponse(res.json()) + + def list_rpn_v2_group_logs_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListRpnV2GroupLogsRequestOrderBy] = None, + group_id: int, + ) -> List[Log]: + """ + :param page: Page number. + :param page_size: Number of rpn v2 group logs per page. + :param order_by: Order of the rpn v2 group logs. + :param group_id: RPN V2 group ID. + :return: :class:`List[Log] ` + + Usage: + :: + + result = api.list_rpn_v2_group_logs_all( + group_id=1, + ) + """ + + return fetch_all_pages( + type=ListRpnV2GroupLogsResponse, + key="logs", + fetcher=self.list_rpn_v2_group_logs, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "group_id": group_id, + }, + ) + + def update_rpn_v2_vlan_for_members( + self, + *, + group_id: int, + member_ids: List[int], + vlan: Optional[int] = None, + ) -> None: + """ + :param group_id: RPN V2 group ID. + :param member_ids: RPN V2 member IDs. + :param vlan: Min: 0. + Max: 3967. + + Usage: + :: + + result = api.update_rpn_v2_vlan_for_members( + group_id=1, + member_ids=[], + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "PATCH", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/vlan", + body=marshal_RpnV2ApiUpdateRpnV2VlanForMembersRequest( + RpnV2ApiUpdateRpnV2VlanForMembersRequest( + group_id=group_id, + member_ids=member_ids, + vlan=vlan, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def enable_rpn_v2_group_compatibility( + self, + *, + group_id: int, + rpnv1_group_id: int, + ) -> None: + """ + :param group_id: RPN V2 group ID. + :param rpnv1_group_id: RPN V1 group ID. + + Usage: + :: + + result = api.enable_rpn_v2_group_compatibility( + group_id=1, + rpnv1_group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/enable-compatibility", + body=marshal_RpnV2ApiEnableRpnV2GroupCompatibilityRequest( + RpnV2ApiEnableRpnV2GroupCompatibilityRequest( + group_id=group_id, + rpnv1_group_id=rpnv1_group_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def disable_rpn_v2_group_compatibility( + self, + *, + group_id: int, + ) -> None: + """ + :param group_id: RPN V2 group ID. + + Usage: + :: + + result = api.disable_rpn_v2_group_compatibility( + group_id=1, + ) + """ + + param_group_id = validate_path_param("group_id", group_id) + + res = self._request( + "POST", + f"/dedibox/v1/rpnv2/groups/{param_group_id}/disable-compatibility", + body={}, + ) + + self._throw_on_error(res) diff --git a/scaleway/scaleway/dedibox/v1/content.py b/scaleway/scaleway/dedibox/v1/content.py new file mode 100644 index 000000000..443067327 --- /dev/null +++ b/scaleway/scaleway/dedibox/v1/content.py @@ -0,0 +1,86 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from typing import List + +from .types import ( + BMCAccessStatus, + IPv6BlockDelegationStatus, + RpnGroupMemberStatus, + RpnSanStatus, + RpnV2GroupStatus, + RpnV2MemberStatus, + ServerInstallStatus, + ServerStatus, + ServiceProvisioningStatus, +) + +BMC_ACCESS_TRANSIENT_STATUSES: List[BMCAccessStatus] = [ + BMCAccessStatus.CREATING, + BMCAccessStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`BMCAccessStatus `. +""" +I_PV6_BLOCK_DELEGATION_TRANSIENT_STATUSES: List[IPv6BlockDelegationStatus] = [ + IPv6BlockDelegationStatus.UPDATING, +] +""" +Lists transient statutes of the enum :class:`IPv6BlockDelegationStatus `. +""" +RPN_GROUP_MEMBER_TRANSIENT_STATUSES: List[RpnGroupMemberStatus] = [ + RpnGroupMemberStatus.CREATING, + RpnGroupMemberStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`RpnGroupMemberStatus `. +""" +RPN_SAN_TRANSIENT_STATUSES: List[RpnSanStatus] = [ + RpnSanStatus.CREATING, + RpnSanStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`RpnSanStatus `. +""" +RPN_V2_GROUP_TRANSIENT_STATUSES: List[RpnV2GroupStatus] = [ + RpnV2GroupStatus.CREATING, + RpnV2GroupStatus.UPDATING, + RpnV2GroupStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`RpnV2GroupStatus `. +""" +RPN_V2_MEMBER_TRANSIENT_STATUSES: List[RpnV2MemberStatus] = [ + RpnV2MemberStatus.CREATING, + RpnV2MemberStatus.UPDATING, + RpnV2MemberStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`RpnV2MemberStatus `. +""" +SERVER_INSTALL_TRANSIENT_STATUSES: List[ServerInstallStatus] = [ + ServerInstallStatus.BOOTING, + ServerInstallStatus.SETTING_UP_RAID, + ServerInstallStatus.PARTITIONING, + ServerInstallStatus.FORMATTING, + ServerInstallStatus.INSTALLING, + ServerInstallStatus.CONFIGURING, + ServerInstallStatus.CONFIGURING_BOOTLOADER, + ServerInstallStatus.REBOOTING, +] +""" +Lists transient statutes of the enum :class:`ServerInstallStatus `. +""" +SERVER_TRANSIENT_STATUSES: List[ServerStatus] = [ + ServerStatus.DELIVERING, + ServerStatus.INSTALLING, +] +""" +Lists transient statutes of the enum :class:`ServerStatus `. +""" +SERVICE_PROVISIONING_TRANSIENT_STATUSES: List[ServiceProvisioningStatus] = [ + ServiceProvisioningStatus.DELIVERING, + ServiceProvisioningStatus.EXPIRING, +] +""" +Lists transient statutes of the enum :class:`ServiceProvisioningStatus `. +""" diff --git a/scaleway/scaleway/dedibox/v1/marshalling.py b/scaleway/scaleway/dedibox/v1/marshalling.py new file mode 100644 index 000000000..4452a9a73 --- /dev/null +++ b/scaleway/scaleway/dedibox/v1/marshalling.py @@ -0,0 +1,3523 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import Any, Dict +from dateutil import parser + +from scaleway_core.profile import ProfileDefaults +from scaleway_core.bridge import ( + unmarshal_Money, +) +from .types import ( + OfferServerInfoStock, + PartitionFileSystem, + IP, + CPU, + Disk, + Memory, + PersistentMemory, + RaidController, + OfferAntiDosInfo, + OfferBackupInfo, + OfferBandwidthInfo, + OfferFailoverBlockInfo, + OfferFailoverIpInfo, + OfferLicenseInfo, + OfferRPNInfo, + OfferSANInfo, + OfferServerInfo, + OfferServiceLevelInfo, + OfferStorageInfo, + Offer, + OS, + RpnSan, + RpnGroup, + NetworkInterface, + ServerLocation, + ServerOption, + ServiceLevel, + Server, + RpnV2GroupSubnet, + RpnV2Group, + Service, + FailoverBlock, + FailoverIP, + BMCAccess, + Backup, + CanOrderResponse, + CreateFailoverIPsResponse, + GetIPv6BlockQuotasResponseQuota, + GetIPv6BlockQuotasResponse, + GetRemainingQuotaResponse, + GetRpnStatusResponse, + IPv6Block, + Invoice, + ListFailoverIPsResponse, + ListIPv6BlockSubnetsAvailableResponseSubnet, + ListIPv6BlockSubnetsAvailableResponse, + InvoiceSummary, + ListInvoicesResponse, + RpnSanIpRpnV2Group, + RpnSanIpServer, + RpnSanIp, + ListIpsResponse, + ListOSResponse, + ListOffersResponse, + RefundSummary, + ListRefundsResponse, + RpnSanServer, + ListRpnCapableSanServersResponse, + ListRpnCapableServersResponse, + RpnGroupMember, + ListRpnGroupMembersResponse, + ListRpnGroupsResponse, + ListRpnInvitesResponse, + RpnSanSummary, + ListRpnSansResponse, + RpnServerCapability, + ListRpnServerCapabilitiesResponse, + ListRpnV2CapableResourcesResponse, + RpnV2Member, + Log, + ListRpnV2GroupLogsResponse, + ListRpnV2GroupsResponse, + ListRpnV2MembersResponse, + ServerDisk, + ListServerDisksResponse, + ServerEvent, + ListServerEventsResponse, + ServerSummary, + ListServersResponse, + ListServicesResponse, + ListSubscribableServerOptionsResponse, + RaidArray, + Raid, + Refund, + Rescue, + Partition, + ServerDefaultPartitioning, + ServerInstall, + SubscribeStorageOptionsResponse, + AttachFailoverIPToMacAddressRequest, + AttachFailoverIPsRequest, + CreateFailoverIPsRequest, + CreateServerRequest, + DetachFailoverIPsRequest, + IPv6BlockApiCreateIPv6BlockRequest, + IPv6BlockApiCreateIPv6BlockSubnetRequest, + IPv6BlockApiUpdateIPv6BlockRequest, + InstallPartition, + InstallServerRequest, + RpnSanApiAddIpRequest, + RpnSanApiCreateRpnSanRequest, + RpnSanApiRemoveIpRequest, + RpnV1ApiAddRpnGroupMembersRequest, + RpnV1ApiCreateRpnGroupRequest, + RpnV1ApiDeleteRpnGroupMembersRequest, + RpnV1ApiLeaveRpnGroupRequest, + RpnV1ApiRpnGroupInviteRequest, + RpnV1ApiUpdateRpnGroupNameRequest, + RpnV2ApiAddRpnV2MembersRequest, + RpnV2ApiCreateRpnV2GroupRequest, + RpnV2ApiDeleteRpnV2MembersRequest, + RpnV2ApiEnableRpnV2GroupCompatibilityRequest, + RpnV2ApiUpdateRpnV2GroupNameRequest, + RpnV2ApiUpdateRpnV2VlanForMembersRequest, + StartBMCAccessRequest, + StartRescueRequest, + SubscribeServerOptionRequest, + SubscribeStorageOptionsRequest, + UpdatableRaidArray, + UpdateRaidRequest, + UpdateReverseRequest, + UpdateServerBackupRequest, + UpdateServerRequest, + UpdateServerTagsRequest, +) + + +def unmarshal_IP(data: Any) -> IP: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'IP' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("ip_id", None) + if field is not None: + args["ip_id"] = field + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("reverse", None) + if field is not None: + args["reverse"] = field + + field = data.get("version", None) + if field is not None: + args["version"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + field = data.get("netmask", None) + if field is not None: + args["netmask"] = field + + field = data.get("semantic", None) + if field is not None: + args["semantic"] = field + + field = data.get("gateway", None) + if field is not None: + args["gateway"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + return IP(**args) + + +def unmarshal_CPU(data: Any) -> CPU: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'CPU' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("core_count", None) + if field is not None: + args["core_count"] = field + + field = data.get("thread_count", None) + if field is not None: + args["thread_count"] = field + + field = data.get("frequency", None) + if field is not None: + args["frequency"] = field + + return CPU(**args) + + +def unmarshal_Disk(data: Any) -> Disk: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Disk' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("capacity", None) + if field is not None: + args["capacity"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + return Disk(**args) + + +def unmarshal_Memory(data: Any) -> Memory: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Memory' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("capacity", None) + if field is not None: + args["capacity"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("frequency", None) + if field is not None: + args["frequency"] = field + + field = data.get("is_ecc", None) + if field is not None: + args["is_ecc"] = field + + return Memory(**args) + + +def unmarshal_PersistentMemory(data: Any) -> PersistentMemory: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'PersistentMemory' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("capacity", None) + if field is not None: + args["capacity"] = field + + field = data.get("frequency", None) + if field is not None: + args["frequency"] = field + + field = data.get("model", None) + if field is not None: + args["model"] = field + + return PersistentMemory(**args) + + +def unmarshal_RaidController(data: Any) -> RaidController: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RaidController' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("model", None) + if field is not None: + args["model"] = field + + field = data.get("raid_level", None) + if field is not None: + args["raid_level"] = field + + return RaidController(**args) + + +def unmarshal_OfferAntiDosInfo(data: Any) -> OfferAntiDosInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferAntiDosInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + return OfferAntiDosInfo(**args) + + +def unmarshal_OfferBackupInfo(data: Any) -> OfferBackupInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferBackupInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("size", None) + if field is not None: + args["size"] = field + + return OfferBackupInfo(**args) + + +def unmarshal_OfferBandwidthInfo(data: Any) -> OfferBandwidthInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferBandwidthInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("speed", None) + if field is not None: + args["speed"] = field + + return OfferBandwidthInfo(**args) + + +def unmarshal_OfferFailoverBlockInfo(data: Any) -> OfferFailoverBlockInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferFailoverBlockInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("onetime_fees", None) + if field is not None: + args["onetime_fees"] = unmarshal_Offer(field) + + return OfferFailoverBlockInfo(**args) + + +def unmarshal_OfferFailoverIpInfo(data: Any) -> OfferFailoverIpInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferFailoverIpInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("onetime_fees", None) + if field is not None: + args["onetime_fees"] = unmarshal_Offer(field) + + return OfferFailoverIpInfo(**args) + + +def unmarshal_OfferLicenseInfo(data: Any) -> OfferLicenseInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferLicenseInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("bound_to_ip", None) + if field is not None: + args["bound_to_ip"] = field + + return OfferLicenseInfo(**args) + + +def unmarshal_OfferRPNInfo(data: Any) -> OfferRPNInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferRPNInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("speed", None) + if field is not None: + args["speed"] = field + + return OfferRPNInfo(**args) + + +def unmarshal_OfferSANInfo(data: Any) -> OfferSANInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferSANInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("size", None) + if field is not None: + args["size"] = field + + field = data.get("ha", None) + if field is not None: + args["ha"] = field + + field = data.get("device_type", None) + if field is not None: + args["device_type"] = field + + return OfferSANInfo(**args) + + +def unmarshal_OfferServerInfo(data: Any) -> OfferServerInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferServerInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("bandwidth", None) + if field is not None: + args["bandwidth"] = field + + field = data.get("stock", None) + if field is not None: + args["stock"] = field + + field = data.get("commercial_range", None) + if field is not None: + args["commercial_range"] = field + + field = data.get("disks", None) + if field is not None: + args["disks"] = ( + [unmarshal_Disk(v) for v in field] if field is not None else None + ) + + field = data.get("cpus", None) + if field is not None: + args["cpus"] = [unmarshal_CPU(v) for v in field] if field is not None else None + + field = data.get("memories", None) + if field is not None: + args["memories"] = ( + [unmarshal_Memory(v) for v in field] if field is not None else None + ) + + field = data.get("persistent_memories", None) + if field is not None: + args["persistent_memories"] = ( + [unmarshal_PersistentMemory(v) for v in field] + if field is not None + else None + ) + + field = data.get("raid_controllers", None) + if field is not None: + args["raid_controllers"] = ( + [unmarshal_RaidController(v) for v in field] if field is not None else None + ) + + field = data.get("available_options", None) + if field is not None: + args["available_options"] = ( + [unmarshal_Offer(v) for v in field] if field is not None else None + ) + + field = data.get("connectivity", None) + if field is not None: + args["connectivity"] = field + + field = data.get("stock_by_datacenter", None) + if field is not None: + args["stock_by_datacenter"] = ( + {key: OfferServerInfoStock(value) for key, value in field.items()} + if field is not None + else None + ) + + field = data.get("rpn_version", None) + if field is not None: + args["rpn_version"] = field + + field = data.get("onetime_fees", None) + if field is not None: + args["onetime_fees"] = unmarshal_Offer(field) + + return OfferServerInfo(**args) + + +def unmarshal_OfferServiceLevelInfo(data: Any) -> OfferServiceLevelInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferServiceLevelInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("support_ticket", None) + if field is not None: + args["support_ticket"] = field + + field = data.get("support_phone", None) + if field is not None: + args["support_phone"] = field + + field = data.get("sales_support", None) + if field is not None: + args["sales_support"] = field + + field = data.get("git", None) + if field is not None: + args["git"] = field + + field = data.get("sla", None) + if field is not None: + args["sla"] = field + + field = data.get("priority_support", None) + if field is not None: + args["priority_support"] = field + + field = data.get("high_rpn_bandwidth", None) + if field is not None: + args["high_rpn_bandwidth"] = field + + field = data.get("customization", None) + if field is not None: + args["customization"] = field + + field = data.get("antidos", None) + if field is not None: + args["antidos"] = field + + field = data.get("extra_failover_quota", None) + if field is not None: + args["extra_failover_quota"] = field + + field = data.get("available_options", None) + if field is not None: + args["available_options"] = ( + [unmarshal_Offer(v) for v in field] if field is not None else None + ) + + return OfferServiceLevelInfo(**args) + + +def unmarshal_OfferStorageInfo(data: Any) -> OfferStorageInfo: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OfferStorageInfo' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("max_quota", None) + if field is not None: + args["max_quota"] = field + + field = data.get("size", None) + if field is not None: + args["size"] = field + + return OfferStorageInfo(**args) + + +def unmarshal_Offer(data: Any) -> Offer: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Offer' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("catalog", None) + if field is not None: + args["catalog"] = field + + field = data.get("payment_frequency", None) + if field is not None: + args["payment_frequency"] = field + + field = data.get("pricing", None) + if field is not None: + args["pricing"] = unmarshal_Money(field) + + field = data.get("server_info", None) + if field is not None: + args["server_info"] = unmarshal_OfferServerInfo(field) + + field = data.get("service_level_info", None) + if field is not None: + args["service_level_info"] = unmarshal_OfferServiceLevelInfo(field) + + field = data.get("rpn_info", None) + if field is not None: + args["rpn_info"] = unmarshal_OfferRPNInfo(field) + + field = data.get("san_info", None) + if field is not None: + args["san_info"] = unmarshal_OfferSANInfo(field) + + field = data.get("antidos_info", None) + if field is not None: + args["antidos_info"] = unmarshal_OfferAntiDosInfo(field) + + field = data.get("backup_info", None) + if field is not None: + args["backup_info"] = unmarshal_OfferBackupInfo(field) + + field = data.get("usb_storage_info", None) + if field is not None: + args["usb_storage_info"] = unmarshal_OfferStorageInfo(field) + + field = data.get("storage_info", None) + if field is not None: + args["storage_info"] = unmarshal_OfferStorageInfo(field) + + field = data.get("license_info", None) + if field is not None: + args["license_info"] = unmarshal_OfferLicenseInfo(field) + + field = data.get("failover_ip_info", None) + if field is not None: + args["failover_ip_info"] = unmarshal_OfferFailoverIpInfo(field) + + field = data.get("failover_block_info", None) + if field is not None: + args["failover_block_info"] = unmarshal_OfferFailoverBlockInfo(field) + + field = data.get("bandwidth_info", None) + if field is not None: + args["bandwidth_info"] = unmarshal_OfferBandwidthInfo(field) + + return Offer(**args) + + +def unmarshal_OS(data: Any) -> OS: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'OS' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("version", None) + if field is not None: + args["version"] = field + + field = data.get("arch", None) + if field is not None: + args["arch"] = field + + field = data.get("allow_custom_partitioning", None) + if field is not None: + args["allow_custom_partitioning"] = field + + field = data.get("allow_ssh_keys", None) + if field is not None: + args["allow_ssh_keys"] = field + + field = data.get("requires_user", None) + if field is not None: + args["requires_user"] = field + + field = data.get("requires_admin_password", None) + if field is not None: + args["requires_admin_password"] = field + + field = data.get("requires_panel_password", None) + if field is not None: + args["requires_panel_password"] = field + + field = data.get("allowed_filesystems", None) + if field is not None: + args["allowed_filesystems"] = ( + [PartitionFileSystem(v) for v in field] if field is not None else None + ) + + field = data.get("requires_license", None) + if field is not None: + args["requires_license"] = field + + field = data.get("license_offers", None) + if field is not None: + args["license_offers"] = ( + [unmarshal_Offer(v) for v in field] if field is not None else None + ) + + field = data.get("display_name", None) + if field is not None: + args["display_name"] = field + + field = data.get("password_regex", None) + if field is not None: + args["password_regex"] = field + + field = data.get("hostname_max_length", None) + if field is not None: + args["hostname_max_length"] = field + + field = data.get("max_partitions", None) + if field is not None: + args["max_partitions"] = field + + field = data.get("panel_password_regex", None) + if field is not None: + args["panel_password_regex"] = field + + field = data.get("requires_valid_hostname", None) + if field is not None: + args["requires_valid_hostname"] = field + + field = data.get("hostname_regex", None) + if field is not None: + args["hostname_regex"] = field + + field = data.get("released_at", None) + if field is not None: + args["released_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + return OS(**args) + + +def unmarshal_RpnSan(data: Any) -> RpnSan: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSan' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("server_hostname", None) + if field is not None: + args["server_hostname"] = field + + field = data.get("iqn_suffix", None) + if field is not None: + args["iqn_suffix"] = field + + field = data.get("offer_id", None) + if field is not None: + args["offer_id"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("offer_name", None) + if field is not None: + args["offer_name"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("storage_size", None) + if field is not None: + args["storage_size"] = field + + field = data.get("iqn", None) + if field is not None: + args["iqn"] = field + + field = data.get("rpnv1_compatible", None) + if field is not None: + args["rpnv1_compatible"] = field + + field = data.get("rpnv1_implicit", None) + if field is not None: + args["rpnv1_implicit"] = field + + field = data.get("offer", None) + if field is not None: + args["offer"] = unmarshal_Offer(field) + + field = data.get("delivered_at", None) + if field is not None: + args["delivered_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("terminated_at", None) + if field is not None: + args["terminated_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("expires_at", None) + if field is not None: + args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return RpnSan(**args) + + +def unmarshal_RpnGroup(data: Any) -> RpnGroup: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnGroup' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("active", None) + if field is not None: + args["active"] = field + + field = data.get("owner", None) + if field is not None: + args["owner"] = field + + field = data.get("members_count", None) + if field is not None: + args["members_count"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return RpnGroup(**args) + + +def unmarshal_NetworkInterface(data: Any) -> NetworkInterface: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'NetworkInterface' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("card_id", None) + if field is not None: + args["card_id"] = field + + field = data.get("device_id", None) + if field is not None: + args["device_id"] = field + + field = data.get("mac", None) + if field is not None: + args["mac"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("ips", None) + if field is not None: + args["ips"] = [unmarshal_IP(v) for v in field] if field is not None else None + + return NetworkInterface(**args) + + +def unmarshal_ServerLocation(data: Any) -> ServerLocation: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerLocation' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("rack", None) + if field is not None: + args["rack"] = field + + field = data.get("room", None) + if field is not None: + args["room"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + return ServerLocation(**args) + + +def unmarshal_ServerOption(data: Any) -> ServerOption: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerOption' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("options", None) + if field is not None: + args["options"] = ( + [unmarshal_ServerOption(v) for v in field] if field is not None else None + ) + + field = data.get("offer", None) + if field is not None: + args["offer"] = unmarshal_Offer(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("expired_at", None) + if field is not None: + args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return ServerOption(**args) + + +def unmarshal_ServiceLevel(data: Any) -> ServiceLevel: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServiceLevel' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("offer_id", None) + if field is not None: + args["offer_id"] = field + + field = data.get("level", None) + if field is not None: + args["level"] = field + + return ServiceLevel(**args) + + +def unmarshal_Server(data: Any) -> Server: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Server' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("rebooted_at", None) + if field is not None: + args["rebooted_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("abuse_contact", None) + if field is not None: + args["abuse_contact"] = field + + field = data.get("interfaces", None) + if field is not None: + args["interfaces"] = ( + [unmarshal_NetworkInterface(v) for v in field] + if field is not None + else None + ) + + field = data.get("zone", None) + if field is not None: + args["zone"] = field + + field = data.get("options", None) + if field is not None: + args["options"] = ( + [unmarshal_ServerOption(v) for v in field] if field is not None else None + ) + + field = data.get("has_bmc", None) + if field is not None: + args["has_bmc"] = field + + field = data.get("tags", None) + if field is not None: + args["tags"] = field + + field = data.get("is_outsourced", None) + if field is not None: + args["is_outsourced"] = field + + field = data.get("ipv6_slaac", None) + if field is not None: + args["ipv6_slaac"] = field + + field = data.get("qinq", None) + if field is not None: + args["qinq"] = field + + field = data.get("is_rpnv2_member", None) + if field is not None: + args["is_rpnv2_member"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("expired_at", None) + if field is not None: + args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("offer", None) + if field is not None: + args["offer"] = unmarshal_Offer(field) + + field = data.get("location", None) + if field is not None: + args["location"] = unmarshal_ServerLocation(field) + + field = data.get("os", None) + if field is not None: + args["os"] = unmarshal_OS(field) + + field = data.get("level", None) + if field is not None: + args["level"] = unmarshal_ServiceLevel(field) + + field = data.get("rescue_os", None) + if field is not None: + args["rescue_os"] = unmarshal_OS(field) + + return Server(**args) + + +def unmarshal_RpnV2GroupSubnet(data: Any) -> RpnV2GroupSubnet: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnV2GroupSubnet' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + return RpnV2GroupSubnet(**args) + + +def unmarshal_RpnV2Group(data: Any) -> RpnV2Group: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnV2Group' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("compatible_rpnv1", None) + if field is not None: + args["compatible_rpnv1"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("owner", None) + if field is not None: + args["owner"] = field + + field = data.get("members_count", None) + if field is not None: + args["members_count"] = field + + field = data.get("gateway", None) + if field is not None: + args["gateway"] = field + + field = data.get("subnet", None) + if field is not None: + args["subnet"] = unmarshal_RpnV2GroupSubnet(field) + + field = data.get("rpnv1_group", None) + if field is not None: + args["rpnv1_group"] = unmarshal_RpnGroup(field) + + return RpnV2Group(**args) + + +def unmarshal_Service(data: Any) -> Service: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Service' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("provisioning_status", None) + if field is not None: + args["provisioning_status"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("resource_id", None) + if field is not None: + args["resource_id"] = field + + field = data.get("offer", None) + if field is not None: + args["offer"] = unmarshal_Offer(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("delivered_at", None) + if field is not None: + args["delivered_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("terminated_at", None) + if field is not None: + args["terminated_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("expires_at", None) + if field is not None: + args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return Service(**args) + + +def unmarshal_FailoverBlock(data: Any) -> FailoverBlock: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'FailoverBlock' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("nameservers", None) + if field is not None: + args["nameservers"] = field + + field = data.get("ip_version", None) + if field is not None: + args["ip_version"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + field = data.get("netmask", None) + if field is not None: + args["netmask"] = field + + field = data.get("gateway_ip", None) + if field is not None: + args["gateway_ip"] = field + + return FailoverBlock(**args) + + +def unmarshal_FailoverIP(data: Any) -> FailoverIP: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'FailoverIP' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("reverse", None) + if field is not None: + args["reverse"] = field + + field = data.get("ip_version", None) + if field is not None: + args["ip_version"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + field = data.get("netmask", None) + if field is not None: + args["netmask"] = field + + field = data.get("gateway_ip", None) + if field is not None: + args["gateway_ip"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("mac", None) + if field is not None: + args["mac"] = field + + field = data.get("server_id", None) + if field is not None: + args["server_id"] = field + + field = data.get("block", None) + if field is not None: + args["block"] = unmarshal_FailoverBlock(field) + + field = data.get("server_zone", None) + if field is not None: + args["server_zone"] = field + + return FailoverIP(**args) + + +def unmarshal_BMCAccess(data: Any) -> BMCAccess: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'BMCAccess' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("url", None) + if field is not None: + args["url"] = field + + field = data.get("login", None) + if field is not None: + args["login"] = field + + field = data.get("password", None) + if field is not None: + args["password"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("expires_at", None) + if field is not None: + args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return BMCAccess(**args) + + +def unmarshal_Backup(data: Any) -> Backup: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Backup' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("login", None) + if field is not None: + args["login"] = field + + field = data.get("server", None) + if field is not None: + args["server"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("acl_enabled", None) + if field is not None: + args["acl_enabled"] = field + + field = data.get("autologin", None) + if field is not None: + args["autologin"] = field + + field = data.get("quota_space", None) + if field is not None: + args["quota_space"] = field + + field = data.get("quota_space_used", None) + if field is not None: + args["quota_space_used"] = field + + field = data.get("quota_files", None) + if field is not None: + args["quota_files"] = field + + field = data.get("quota_files_used", None) + if field is not None: + args["quota_files_used"] = field + + return Backup(**args) + + +def unmarshal_CanOrderResponse(data: Any) -> CanOrderResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'CanOrderResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("can_order", None) + if field is not None: + args["can_order"] = field + + field = data.get("quota_ok", None) + if field is not None: + args["quota_ok"] = field + + field = data.get("phone_confirmed", None) + if field is not None: + args["phone_confirmed"] = field + + field = data.get("email_confirmed", None) + if field is not None: + args["email_confirmed"] = field + + field = data.get("user_confirmed", None) + if field is not None: + args["user_confirmed"] = field + + field = data.get("payment_mode", None) + if field is not None: + args["payment_mode"] = field + + field = data.get("billing_ok", None) + if field is not None: + args["billing_ok"] = field + + field = data.get("message", None) + if field is not None: + args["message"] = field + + return CanOrderResponse(**args) + + +def unmarshal_CreateFailoverIPsResponse(data: Any) -> CreateFailoverIPsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'CreateFailoverIPsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("services", None) + if field is not None: + args["services"] = ( + [unmarshal_Service(v) for v in field] if field is not None else None + ) + + return CreateFailoverIPsResponse(**args) + + +def unmarshal_GetIPv6BlockQuotasResponseQuota( + data: Any, +) -> GetIPv6BlockQuotasResponseQuota: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GetIPv6BlockQuotasResponseQuota' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("quota", None) + if field is not None: + args["quota"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + return GetIPv6BlockQuotasResponseQuota(**args) + + +def unmarshal_GetIPv6BlockQuotasResponse(data: Any) -> GetIPv6BlockQuotasResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GetIPv6BlockQuotasResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("quotas", None) + if field is not None: + args["quotas"] = ( + [unmarshal_GetIPv6BlockQuotasResponseQuota(v) for v in field] + if field is not None + else None + ) + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + return GetIPv6BlockQuotasResponse(**args) + + +def unmarshal_GetRemainingQuotaResponse(data: Any) -> GetRemainingQuotaResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GetRemainingQuotaResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("failover_ip_quota", None) + if field is not None: + args["failover_ip_quota"] = field + + field = data.get("failover_ip_remaining_quota", None) + if field is not None: + args["failover_ip_remaining_quota"] = field + + field = data.get("failover_block_quota", None) + if field is not None: + args["failover_block_quota"] = field + + field = data.get("failover_block_remaining_quota", None) + if field is not None: + args["failover_block_remaining_quota"] = field + + return GetRemainingQuotaResponse(**args) + + +def unmarshal_GetRpnStatusResponse(data: Any) -> GetRpnStatusResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GetRpnStatusResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("operations_left", None) + if field is not None: + args["operations_left"] = field + + return GetRpnStatusResponse(**args) + + +def unmarshal_IPv6Block(data: Any) -> IPv6Block: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'IPv6Block' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("duid", None) + if field is not None: + args["duid"] = field + + field = data.get("nameservers", None) + if field is not None: + args["nameservers"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + field = data.get("subnets", None) + if field is not None: + args["subnets"] = ( + [unmarshal_IPv6Block(v) for v in field] if field is not None else None + ) + + field = data.get("delegation_status", None) + if field is not None: + args["delegation_status"] = field + + return IPv6Block(**args) + + +def unmarshal_Invoice(data: Any) -> Invoice: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Invoice' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("payment_method", None) + if field is not None: + args["payment_method"] = field + + field = data.get("content", None) + if field is not None: + args["content"] = field + + field = data.get("transaction_id", None) + if field is not None: + args["transaction_id"] = field + + field = data.get("total_with_taxes", None) + if field is not None: + args["total_with_taxes"] = unmarshal_Money(field) + + field = data.get("total_without_taxes", None) + if field is not None: + args["total_without_taxes"] = unmarshal_Money(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("paid_at", None) + if field is not None: + args["paid_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return Invoice(**args) + + +def unmarshal_ListFailoverIPsResponse(data: Any) -> ListFailoverIPsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListFailoverIPsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("failover_ips", None) + if field is not None: + args["failover_ips"] = ( + [unmarshal_FailoverIP(v) for v in field] if field is not None else None + ) + + return ListFailoverIPsResponse(**args) + + +def unmarshal_ListIPv6BlockSubnetsAvailableResponseSubnet( + data: Any, +) -> ListIPv6BlockSubnetsAvailableResponseSubnet: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListIPv6BlockSubnetsAvailableResponseSubnet' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("address", None) + if field is not None: + args["address"] = field + + field = data.get("cidr", None) + if field is not None: + args["cidr"] = field + + return ListIPv6BlockSubnetsAvailableResponseSubnet(**args) + + +def unmarshal_ListIPv6BlockSubnetsAvailableResponse( + data: Any, +) -> ListIPv6BlockSubnetsAvailableResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListIPv6BlockSubnetsAvailableResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("subnet_availables", None) + if field is not None: + args["subnet_availables"] = ( + [unmarshal_ListIPv6BlockSubnetsAvailableResponseSubnet(v) for v in field] + if field is not None + else None + ) + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + return ListIPv6BlockSubnetsAvailableResponse(**args) + + +def unmarshal_InvoiceSummary(data: Any) -> InvoiceSummary: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'InvoiceSummary' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("payment_method", None) + if field is not None: + args["payment_method"] = field + + field = data.get("transaction_id", None) + if field is not None: + args["transaction_id"] = field + + field = data.get("total_with_taxes", None) + if field is not None: + args["total_with_taxes"] = unmarshal_Money(field) + + field = data.get("total_without_taxes", None) + if field is not None: + args["total_without_taxes"] = unmarshal_Money(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("paid_at", None) + if field is not None: + args["paid_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return InvoiceSummary(**args) + + +def unmarshal_ListInvoicesResponse(data: Any) -> ListInvoicesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListInvoicesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("invoices", None) + if field is not None: + args["invoices"] = ( + [unmarshal_InvoiceSummary(v) for v in field] if field is not None else None + ) + + return ListInvoicesResponse(**args) + + +def unmarshal_RpnSanIpRpnV2Group(data: Any) -> RpnSanIpRpnV2Group: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSanIpRpnV2Group' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + return RpnSanIpRpnV2Group(**args) + + +def unmarshal_RpnSanIpServer(data: Any) -> RpnSanIpServer: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSanIpServer' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + return RpnSanIpServer(**args) + + +def unmarshal_RpnSanIp(data: Any) -> RpnSanIp: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSanIp' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("server", None) + if field is not None: + args["server"] = unmarshal_RpnSanIpServer(field) + + field = data.get("rpnv2_group", None) + if field is not None: + args["rpnv2_group"] = unmarshal_RpnSanIpRpnV2Group(field) + + field = data.get("ip", None) + if field is not None: + args["ip"] = unmarshal_IP(field) + + return RpnSanIp(**args) + + +def unmarshal_ListIpsResponse(data: Any) -> ListIpsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListIpsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("ips", None) + if field is not None: + args["ips"] = ( + [unmarshal_RpnSanIp(v) for v in field] if field is not None else None + ) + + return ListIpsResponse(**args) + + +def unmarshal_ListOSResponse(data: Any) -> ListOSResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListOSResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("os", None) + if field is not None: + args["os"] = [unmarshal_OS(v) for v in field] if field is not None else None + + return ListOSResponse(**args) + + +def unmarshal_ListOffersResponse(data: Any) -> ListOffersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListOffersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("offers", None) + if field is not None: + args["offers"] = ( + [unmarshal_Offer(v) for v in field] if field is not None else None + ) + + return ListOffersResponse(**args) + + +def unmarshal_RefundSummary(data: Any) -> RefundSummary: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RefundSummary' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("method", None) + if field is not None: + args["method"] = field + + field = data.get("total_with_taxes", None) + if field is not None: + args["total_with_taxes"] = unmarshal_Money(field) + + field = data.get("total_without_taxes", None) + if field is not None: + args["total_without_taxes"] = unmarshal_Money(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("refunded_at", None) + if field is not None: + args["refunded_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + return RefundSummary(**args) + + +def unmarshal_ListRefundsResponse(data: Any) -> ListRefundsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRefundsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("refunds", None) + if field is not None: + args["refunds"] = ( + [unmarshal_RefundSummary(v) for v in field] if field is not None else None + ) + + return ListRefundsResponse(**args) + + +def unmarshal_RpnSanServer(data: Any) -> RpnSanServer: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSanServer' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("sans", None) + if field is not None: + args["sans"] = ( + [unmarshal_RpnSan(v) for v in field] if field is not None else None + ) + + field = data.get("zone", None) + if field is not None: + args["zone"] = field + + return RpnSanServer(**args) + + +def unmarshal_ListRpnCapableSanServersResponse( + data: Any, +) -> ListRpnCapableSanServersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnCapableSanServersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("san_servers", None) + if field is not None: + args["san_servers"] = ( + [unmarshal_RpnSanServer(v) for v in field] if field is not None else None + ) + + return ListRpnCapableSanServersResponse(**args) + + +def unmarshal_ListRpnCapableServersResponse(data: Any) -> ListRpnCapableServersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnCapableServersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("servers", None) + if field is not None: + args["servers"] = ( + [unmarshal_Server(v) for v in field] if field is not None else None + ) + + return ListRpnCapableServersResponse(**args) + + +def unmarshal_RpnGroupMember(data: Any) -> RpnGroupMember: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnGroupMember' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("group_id", None) + if field is not None: + args["group_id"] = field + + field = data.get("group_name", None) + if field is not None: + args["group_name"] = field + + field = data.get("group_owner", None) + if field is not None: + args["group_owner"] = field + + field = data.get("owner", None) + if field is not None: + args["owner"] = field + + field = data.get("san_server", None) + if field is not None: + args["san_server"] = unmarshal_RpnSanServer(field) + + field = data.get("server", None) + if field is not None: + args["server"] = unmarshal_Server(field) + + field = data.get("speed", None) + if field is not None: + args["speed"] = field + + return RpnGroupMember(**args) + + +def unmarshal_ListRpnGroupMembersResponse(data: Any) -> ListRpnGroupMembersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnGroupMembersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("members", None) + if field is not None: + args["members"] = ( + [unmarshal_RpnGroupMember(v) for v in field] if field is not None else None + ) + + return ListRpnGroupMembersResponse(**args) + + +def unmarshal_ListRpnGroupsResponse(data: Any) -> ListRpnGroupsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnGroupsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("rpn_groups", None) + if field is not None: + args["rpn_groups"] = ( + [unmarshal_RpnGroup(v) for v in field] if field is not None else None + ) + + return ListRpnGroupsResponse(**args) + + +def unmarshal_ListRpnInvitesResponse(data: Any) -> ListRpnInvitesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnInvitesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("members", None) + if field is not None: + args["members"] = ( + [unmarshal_RpnGroupMember(v) for v in field] if field is not None else None + ) + + return ListRpnInvitesResponse(**args) + + +def unmarshal_RpnSanSummary(data: Any) -> RpnSanSummary: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnSanSummary' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("server_hostname", None) + if field is not None: + args["server_hostname"] = field + + field = data.get("iqn_suffix", None) + if field is not None: + args["iqn_suffix"] = field + + field = data.get("offer_id", None) + if field is not None: + args["offer_id"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("offer_name", None) + if field is not None: + args["offer_name"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("storage_size", None) + if field is not None: + args["storage_size"] = field + + field = data.get("rpnv1_compatible", None) + if field is not None: + args["rpnv1_compatible"] = field + + field = data.get("rpnv1_implicit", None) + if field is not None: + args["rpnv1_implicit"] = field + + field = data.get("delivered_at", None) + if field is not None: + args["delivered_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("terminated_at", None) + if field is not None: + args["terminated_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + field = data.get("expires_at", None) + if field is not None: + args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return RpnSanSummary(**args) + + +def unmarshal_ListRpnSansResponse(data: Any) -> ListRpnSansResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnSansResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("rpn_sans", None) + if field is not None: + args["rpn_sans"] = ( + [unmarshal_RpnSanSummary(v) for v in field] if field is not None else None + ) + + return ListRpnSansResponse(**args) + + +def unmarshal_RpnServerCapability(data: Any) -> RpnServerCapability: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnServerCapability' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + field = data.get("zone", None) + if field is not None: + args["zone"] = field + + field = data.get("compatible_qinq", None) + if field is not None: + args["compatible_qinq"] = field + + field = data.get("can_join_qinq_group", None) + if field is not None: + args["can_join_qinq_group"] = field + + field = data.get("rpnv1_group_count", None) + if field is not None: + args["rpnv1_group_count"] = field + + field = data.get("rpnv2_group_count", None) + if field is not None: + args["rpnv2_group_count"] = field + + field = data.get("can_join_rpnv2_group", None) + if field is not None: + args["can_join_rpnv2_group"] = field + + field = data.get("ip_address", None) + if field is not None: + args["ip_address"] = field + + field = data.get("rpn_version", None) + if field is not None: + args["rpn_version"] = field + + return RpnServerCapability(**args) + + +def unmarshal_ListRpnServerCapabilitiesResponse( + data: Any, +) -> ListRpnServerCapabilitiesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnServerCapabilitiesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("servers", None) + if field is not None: + args["servers"] = ( + [unmarshal_RpnServerCapability(v) for v in field] + if field is not None + else None + ) + + return ListRpnServerCapabilitiesResponse(**args) + + +def unmarshal_ListRpnV2CapableResourcesResponse( + data: Any, +) -> ListRpnV2CapableResourcesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnV2CapableResourcesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("servers", None) + if field is not None: + args["servers"] = ( + [unmarshal_Server(v) for v in field] if field is not None else None + ) + + return ListRpnV2CapableResourcesResponse(**args) + + +def unmarshal_RpnV2Member(data: Any) -> RpnV2Member: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RpnV2Member' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("vlan", None) + if field is not None: + args["vlan"] = field + + field = data.get("server", None) + if field is not None: + args["server"] = unmarshal_Server(field) + + field = data.get("rpnv1_group", None) + if field is not None: + args["rpnv1_group"] = unmarshal_RpnGroup(field) + + field = data.get("speed", None) + if field is not None: + args["speed"] = field + + return RpnV2Member(**args) + + +def unmarshal_Log(data: Any) -> Log: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Log' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("action", None) + if field is not None: + args["action"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("group", None) + if field is not None: + args["group"] = unmarshal_RpnV2Group(field) + + field = data.get("member", None) + if field is not None: + args["member"] = unmarshal_RpnV2Member(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("finished_at", None) + if field is not None: + args["finished_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + return Log(**args) + + +def unmarshal_ListRpnV2GroupLogsResponse(data: Any) -> ListRpnV2GroupLogsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnV2GroupLogsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("logs", None) + if field is not None: + args["logs"] = [unmarshal_Log(v) for v in field] if field is not None else None + + return ListRpnV2GroupLogsResponse(**args) + + +def unmarshal_ListRpnV2GroupsResponse(data: Any) -> ListRpnV2GroupsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnV2GroupsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("rpn_groups", None) + if field is not None: + args["rpn_groups"] = ( + [unmarshal_RpnV2Group(v) for v in field] if field is not None else None + ) + + return ListRpnV2GroupsResponse(**args) + + +def unmarshal_ListRpnV2MembersResponse(data: Any) -> ListRpnV2MembersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListRpnV2MembersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("members", None) + if field is not None: + args["members"] = ( + [unmarshal_RpnV2Member(v) for v in field] if field is not None else None + ) + + return ListRpnV2MembersResponse(**args) + + +def unmarshal_ServerDisk(data: Any) -> ServerDisk: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerDisk' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("connector", None) + if field is not None: + args["connector"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("capacity", None) + if field is not None: + args["capacity"] = field + + field = data.get("is_addon", None) + if field is not None: + args["is_addon"] = field + + return ServerDisk(**args) + + +def unmarshal_ListServerDisksResponse(data: Any) -> ListServerDisksResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListServerDisksResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("disks", None) + if field is not None: + args["disks"] = ( + [unmarshal_ServerDisk(v) for v in field] if field is not None else None + ) + + return ListServerDisksResponse(**args) + + +def unmarshal_ServerEvent(data: Any) -> ServerEvent: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerEvent' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("event_id", None) + if field is not None: + args["event_id"] = field + + field = data.get("description", None) + if field is not None: + args["description"] = field + + field = data.get("date", None) + if field is not None: + args["date"] = parser.isoparse(field) if isinstance(field, str) else field + + return ServerEvent(**args) + + +def unmarshal_ListServerEventsResponse(data: Any) -> ListServerEventsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListServerEventsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("events", None) + if field is not None: + args["events"] = ( + [unmarshal_ServerEvent(v) for v in field] if field is not None else None + ) + + return ListServerEventsResponse(**args) + + +def unmarshal_ServerSummary(data: Any) -> ServerSummary: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerSummary' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("datacenter_name", None) + if field is not None: + args["datacenter_name"] = field + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("expired_at", None) + if field is not None: + args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("offer_id", None) + if field is not None: + args["offer_id"] = field + + field = data.get("offer_name", None) + if field is not None: + args["offer_name"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("interfaces", None) + if field is not None: + args["interfaces"] = ( + [unmarshal_NetworkInterface(v) for v in field] + if field is not None + else None + ) + + field = data.get("zone", None) + if field is not None: + args["zone"] = field + + field = data.get("is_outsourced", None) + if field is not None: + args["is_outsourced"] = field + + field = data.get("qinq", None) + if field is not None: + args["qinq"] = field + + field = data.get("os_id", None) + if field is not None: + args["os_id"] = field + + field = data.get("level", None) + if field is not None: + args["level"] = unmarshal_ServiceLevel(field) + + field = data.get("rpn_version", None) + if field is not None: + args["rpn_version"] = field + + return ServerSummary(**args) + + +def unmarshal_ListServersResponse(data: Any) -> ListServersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListServersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("servers", None) + if field is not None: + args["servers"] = ( + [unmarshal_ServerSummary(v) for v in field] if field is not None else None + ) + + return ListServersResponse(**args) + + +def unmarshal_ListServicesResponse(data: Any) -> ListServicesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListServicesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("services", None) + if field is not None: + args["services"] = ( + [unmarshal_Service(v) for v in field] if field is not None else None + ) + + return ListServicesResponse(**args) + + +def unmarshal_ListSubscribableServerOptionsResponse( + data: Any, +) -> ListSubscribableServerOptionsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListSubscribableServerOptionsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("server_options", None) + if field is not None: + args["server_options"] = ( + [unmarshal_Offer(v) for v in field] if field is not None else None + ) + + return ListSubscribableServerOptionsResponse(**args) + + +def unmarshal_RaidArray(data: Any) -> RaidArray: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'RaidArray' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("raid_level", None) + if field is not None: + args["raid_level"] = field + + field = data.get("disks", None) + if field is not None: + args["disks"] = ( + [unmarshal_ServerDisk(v) for v in field] if field is not None else None + ) + + return RaidArray(**args) + + +def unmarshal_Raid(data: Any) -> Raid: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Raid' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("raid_arrays", None) + if field is not None: + args["raid_arrays"] = ( + [unmarshal_RaidArray(v) for v in field] if field is not None else None + ) + + return Raid(**args) + + +def unmarshal_Refund(data: Any) -> Refund: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Refund' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("method", None) + if field is not None: + args["method"] = field + + field = data.get("content", None) + if field is not None: + args["content"] = field + + field = data.get("total_with_taxes", None) + if field is not None: + args["total_with_taxes"] = unmarshal_Money(field) + + field = data.get("total_without_taxes", None) + if field is not None: + args["total_without_taxes"] = unmarshal_Money(field) + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("refunded_at", None) + if field is not None: + args["refunded_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + + return Refund(**args) + + +def unmarshal_Rescue(data: Any) -> Rescue: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Rescue' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("os_id", None) + if field is not None: + args["os_id"] = field + + field = data.get("login", None) + if field is not None: + args["login"] = field + + field = data.get("password", None) + if field is not None: + args["password"] = field + + field = data.get("protocol", None) + if field is not None: + args["protocol"] = field + + return Rescue(**args) + + +def unmarshal_Partition(data: Any) -> Partition: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Partition' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("file_system", None) + if field is not None: + args["file_system"] = field + + field = data.get("raid_level", None) + if field is not None: + args["raid_level"] = field + + field = data.get("capacity", None) + if field is not None: + args["capacity"] = field + + field = data.get("connectors", None) + if field is not None: + args["connectors"] = field + + field = data.get("mount_point", None) + if field is not None: + args["mount_point"] = field + + return Partition(**args) + + +def unmarshal_ServerDefaultPartitioning(data: Any) -> ServerDefaultPartitioning: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerDefaultPartitioning' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("partitions", None) + if field is not None: + args["partitions"] = ( + [unmarshal_Partition(v) for v in field] if field is not None else None + ) + + return ServerDefaultPartitioning(**args) + + +def unmarshal_ServerInstall(data: Any) -> ServerInstall: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ServerInstall' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("os_id", None) + if field is not None: + args["os_id"] = field + + field = data.get("hostname", None) + if field is not None: + args["hostname"] = field + + field = data.get("partitions", None) + if field is not None: + args["partitions"] = ( + [unmarshal_Partition(v) for v in field] if field is not None else None + ) + + field = data.get("ssh_key_ids", None) + if field is not None: + args["ssh_key_ids"] = field + + field = data.get("status", None) + if field is not None: + args["status"] = field + + field = data.get("user_login", None) + if field is not None: + args["user_login"] = field + + field = data.get("panel_url", None) + if field is not None: + args["panel_url"] = field + + return ServerInstall(**args) + + +def unmarshal_SubscribeStorageOptionsResponse( + data: Any, +) -> SubscribeStorageOptionsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'SubscribeStorageOptionsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("services", None) + if field is not None: + args["services"] = ( + [unmarshal_Service(v) for v in field] if field is not None else None + ) + + return SubscribeStorageOptionsResponse(**args) + + +def marshal_AttachFailoverIPToMacAddressRequest( + request: AttachFailoverIPToMacAddressRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.type_ is not None: + output["type"] = str(request.type_) + + if request.mac is not None: + output["mac"] = request.mac + + return output + + +def marshal_AttachFailoverIPsRequest( + request: AttachFailoverIPsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.server_id is not None: + output["server_id"] = request.server_id + + if request.fips_ids is not None: + output["fips_ids"] = request.fips_ids + + return output + + +def marshal_CreateFailoverIPsRequest( + request: CreateFailoverIPsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.offer_id is not None: + output["offer_id"] = request.offer_id + + if request.quantity is not None: + output["quantity"] = request.quantity + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_CreateServerRequest( + request: CreateServerRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.offer_id is not None: + output["offer_id"] = request.offer_id + + if request.server_option_ids is not None: + output["server_option_ids"] = request.server_option_ids + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.datacenter_name is not None: + output["datacenter_name"] = request.datacenter_name + + return output + + +def marshal_DetachFailoverIPsRequest( + request: DetachFailoverIPsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.fips_ids is not None: + output["fips_ids"] = request.fips_ids + + return output + + +def marshal_IPv6BlockApiCreateIPv6BlockRequest( + request: IPv6BlockApiCreateIPv6BlockRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id + + return output + + +def marshal_IPv6BlockApiCreateIPv6BlockSubnetRequest( + request: IPv6BlockApiCreateIPv6BlockSubnetRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.address is not None: + output["address"] = request.address + + if request.cidr is not None: + output["cidr"] = request.cidr + + return output + + +def marshal_IPv6BlockApiUpdateIPv6BlockRequest( + request: IPv6BlockApiUpdateIPv6BlockRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.nameservers is not None: + output["nameservers"] = request.nameservers + + return output + + +def marshal_InstallPartition( + request: InstallPartition, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.file_system is not None: + output["file_system"] = str(request.file_system) + + if request.raid_level is not None: + output["raid_level"] = str(request.raid_level) + + if request.capacity is not None: + output["capacity"] = request.capacity + + if request.connectors is not None: + output["connectors"] = request.connectors + + if request.mount_point is not None: + output["mount_point"] = request.mount_point + + return output + + +def marshal_InstallServerRequest( + request: InstallServerRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.os_id is not None: + output["os_id"] = request.os_id + + if request.hostname is not None: + output["hostname"] = request.hostname + + if request.user_login is not None: + output["user_login"] = request.user_login + + if request.user_password is not None: + output["user_password"] = request.user_password + + if request.panel_password is not None: + output["panel_password"] = request.panel_password + + if request.root_password is not None: + output["root_password"] = request.root_password + + if request.partitions is not None: + output["partitions"] = [ + marshal_InstallPartition(item, defaults) for item in request.partitions + ] + + if request.ssh_key_ids is not None: + output["ssh_key_ids"] = request.ssh_key_ids + + if request.license_offer_id is not None: + output["license_offer_id"] = request.license_offer_id + + if request.ip_id is not None: + output["ip_id"] = request.ip_id + + return output + + +def marshal_RpnSanApiAddIpRequest( + request: RpnSanApiAddIpRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.ip_ids is not None: + output["ip_ids"] = request.ip_ids + + return output + + +def marshal_RpnSanApiCreateRpnSanRequest( + request: RpnSanApiCreateRpnSanRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.offer_id is not None: + output["offer_id"] = request.offer_id + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RpnSanApiRemoveIpRequest( + request: RpnSanApiRemoveIpRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.ip_ids is not None: + output["ip_ids"] = request.ip_ids + + return output + + +def marshal_RpnV1ApiAddRpnGroupMembersRequest( + request: RpnV1ApiAddRpnGroupMembersRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.server_ids is not None: + output["server_ids"] = request.server_ids + + if request.san_server_ids is not None: + output["san_server_ids"] = request.san_server_ids + + return output + + +def marshal_RpnV1ApiCreateRpnGroupRequest( + request: RpnV1ApiCreateRpnGroupRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + if request.server_ids is not None: + output["server_ids"] = request.server_ids + + if request.san_server_ids is not None: + output["san_server_ids"] = request.san_server_ids + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RpnV1ApiDeleteRpnGroupMembersRequest( + request: RpnV1ApiDeleteRpnGroupMembersRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.member_ids is not None: + output["member_ids"] = request.member_ids + + return output + + +def marshal_RpnV1ApiLeaveRpnGroupRequest( + request: RpnV1ApiLeaveRpnGroupRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.member_ids is not None: + output["member_ids"] = request.member_ids + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RpnV1ApiRpnGroupInviteRequest( + request: RpnV1ApiRpnGroupInviteRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.server_ids is not None: + output["server_ids"] = request.server_ids + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RpnV1ApiUpdateRpnGroupNameRequest( + request: RpnV1ApiUpdateRpnGroupNameRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + return output + + +def marshal_RpnV2ApiAddRpnV2MembersRequest( + request: RpnV2ApiAddRpnV2MembersRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.servers is not None: + output["servers"] = request.servers + + return output + + +def marshal_RpnV2ApiCreateRpnV2GroupRequest( + request: RpnV2ApiCreateRpnV2GroupRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + if request.servers is not None: + output["servers"] = request.servers + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.type_ is not None: + output["type"] = str(request.type_) + + return output + + +def marshal_RpnV2ApiDeleteRpnV2MembersRequest( + request: RpnV2ApiDeleteRpnV2MembersRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.member_ids is not None: + output["member_ids"] = request.member_ids + + return output + + +def marshal_RpnV2ApiEnableRpnV2GroupCompatibilityRequest( + request: RpnV2ApiEnableRpnV2GroupCompatibilityRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.rpnv1_group_id is not None: + output["rpnv1_group_id"] = request.rpnv1_group_id + + return output + + +def marshal_RpnV2ApiUpdateRpnV2GroupNameRequest( + request: RpnV2ApiUpdateRpnV2GroupNameRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + return output + + +def marshal_RpnV2ApiUpdateRpnV2VlanForMembersRequest( + request: RpnV2ApiUpdateRpnV2VlanForMembersRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.member_ids is not None: + output["member_ids"] = request.member_ids + + if request.vlan is not None: + output["vlan"] = request.vlan + + return output + + +def marshal_StartBMCAccessRequest( + request: StartBMCAccessRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.ip is not None: + output["ip"] = request.ip + + return output + + +def marshal_StartRescueRequest( + request: StartRescueRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.os_id is not None: + output["os_id"] = request.os_id + + return output + + +def marshal_SubscribeServerOptionRequest( + request: SubscribeServerOptionRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.option_id is not None: + output["option_id"] = request.option_id + + return output + + +def marshal_SubscribeStorageOptionsRequest( + request: SubscribeStorageOptionsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.options_ids is not None: + output["options_ids"] = request.options_ids + + return output + + +def marshal_UpdatableRaidArray( + request: UpdatableRaidArray, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.raid_level is not None: + output["raid_level"] = str(request.raid_level) + + if request.disk_ids is not None: + output["disk_ids"] = request.disk_ids + + return output + + +def marshal_UpdateRaidRequest( + request: UpdateRaidRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.raid_arrays is not None: + output["raid_arrays"] = [ + marshal_UpdatableRaidArray(item, defaults) for item in request.raid_arrays + ] + + return output + + +def marshal_UpdateReverseRequest( + request: UpdateReverseRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.reverse is not None: + output["reverse"] = request.reverse + + return output + + +def marshal_UpdateServerBackupRequest( + request: UpdateServerBackupRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.password is not None: + output["password"] = request.password + + if request.autologin is not None: + output["autologin"] = request.autologin + + if request.acl_enabled is not None: + output["acl_enabled"] = request.acl_enabled + + return output + + +def marshal_UpdateServerRequest( + request: UpdateServerRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.hostname is not None: + output["hostname"] = request.hostname + + if request.enable_ipv6 is not None: + output["enable_ipv6"] = request.enable_ipv6 + + return output + + +def marshal_UpdateServerTagsRequest( + request: UpdateServerTagsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.tags is not None: + output["tags"] = request.tags + + return output diff --git a/scaleway/scaleway/dedibox/v1/types.py b/scaleway/scaleway/dedibox/v1/types.py new file mode 100644 index 000000000..0c058935e --- /dev/null +++ b/scaleway/scaleway/dedibox/v1/types.py @@ -0,0 +1,4452 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +from typing import Dict, List, Optional + +from scaleway_core.bridge import ( + Money, + Zone, +) +from scaleway_core.utils import ( + StrEnumMeta, +) + + +class AttachFailoverIPToMacAddressRequestMacType(str, Enum, metaclass=StrEnumMeta): + MAC_TYPE_UNKNOWN = "mac_type_unknown" + VMWARE = "vmware" + KVM = "kvm" + XEN = "xen" + + def __str__(self) -> str: + return str(self.value) + + +class BMCAccessStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + CREATING = "creating" + CREATED = "created" + DELETING = "deleting" + + def __str__(self) -> str: + return str(self.value) + + +class BackupStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_BACKUP_STATUS = "unknown_backup_status" + UNINITIALIZED = "uninitialized" + INACTIVE = "inactive" + READY = "ready" + + def __str__(self) -> str: + return str(self.value) + + +class FailoverBlockVersion(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_VERSION = "unknown_version" + IPV4 = "ipv4" + IPV6 = "ipv6" + + def __str__(self) -> str: + return str(self.value) + + +class FailoverIPInterfaceType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + NORMAL = "normal" + IPMI = "ipmi" + VIRTUAL = "virtual" + + def __str__(self) -> str: + return str(self.value) + + +class FailoverIPStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + READY = "ready" + BUSY = "busy" + LOCKED = "locked" + + def __str__(self) -> str: + return str(self.value) + + +class FailoverIPVersion(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_VERSION = "unknown_version" + IPV4 = "ipv4" + IPV6 = "ipv6" + + def __str__(self) -> str: + return str(self.value) + + +class GetRpnStatusResponseStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + BUSY = "busy" + OPERATIONAL = "operational" + + def __str__(self) -> str: + return str(self.value) + + +class IPSemantic(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + PROXAD = "proxad" + EXT = "ext" + PUBLIC = "public" + PRIVATE = "private" + IPMI = "ipmi" + ADM = "adm" + REDIRECT = "redirect" + MIGRATION = "migration" + + def __str__(self) -> str: + return str(self.value) + + +class IPStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + READY = "ready" + BUSY = "busy" + LOCKED = "locked" + + def __str__(self) -> str: + return str(self.value) + + +class IPVersion(str, Enum, metaclass=StrEnumMeta): + IPV4 = "ipv4" + IPV6 = "ipv6" + + def __str__(self) -> str: + return str(self.value) + + +class IPv6BlockDelegationStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + UPDATING = "updating" + DONE = "done" + + def __str__(self) -> str: + return str(self.value) + + +class InvoicePaymentMethod(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_PAYMENT_METHOD = "unknown_payment_method" + CREDIT_CARD = "credit_card" + AMEX = "amex" + PAYPAL = "paypal" + TRANSFER = "transfer" + DIRECT_DEBIT = "direct_debit" + + def __str__(self) -> str: + return str(self.value) + + +class InvoiceStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_INVOICE_STATUS = "unknown_invoice_status" + UNPAID = "unpaid" + PAID = "paid" + ERRORED = "errored" + + def __str__(self) -> str: + return str(self.value) + + +class ListFailoverIPsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + IP_ASC = "ip_asc" + IP_DESC = "ip_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListInvoicesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListOSRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + RELEASED_AT_ASC = "released_at_asc" + RELEASED_AT_DESC = "released_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListOffersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + PRICE_ASC = "price_asc" + PRICE_DESC = "price_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRefundsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnCapableSanServersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnCapableServersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnGroupMembersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnGroupsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnInvitesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnSansRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnServerCapabilitiesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnV2CapableResourcesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnV2GroupLogsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnV2GroupsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnV2MembersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListRpnV2MembersRequestType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + RPNV1_GROUP = "rpnv1_group" + SERVER = "server" + + def __str__(self) -> str: + return str(self.value) + + +class ListServerDisksRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListServerEventsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListServersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListServicesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + + def __str__(self) -> str: + return str(self.value) + + +class LogAction(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_LOG_ACTION = "unknown_log_action" + GROUP_CREATED = "group_created" + GROUP_DELETED = "group_deleted" + MEMBERS_ADDED = "members_added" + MEMBERS_DELETED = "members_deleted" + DESCRIPTION_UPDATED = "description_updated" + RPNV1_MEMBERS_ADDED = "rpnv1_members_added" + RPNV1_MEMBERS_DELETED = "rpnv1_members_deleted" + VLAN_UPDATED = "vlan_updated" + VLAN_UPDATED_ON_ALL_SERVERS = "vlan_updated_on_all_servers" + + def __str__(self) -> str: + return str(self.value) + + +class LogStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_LOG_STATUS = "unknown_log_status" + SUCCESS = "success" + IN_PROGRESS = "in_progress" + ERROR = "error" + + def __str__(self) -> str: + return str(self.value) + + +class MemoryType(str, Enum, metaclass=StrEnumMeta): + DDR2 = "ddr2" + DDR3 = "ddr3" + DDR4 = "ddr4" + + def __str__(self) -> str: + return str(self.value) + + +class NetworkInterfaceInterfaceType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + NORMAL = "normal" + IPMI = "ipmi" + VIRTUAL = "virtual" + + def __str__(self) -> str: + return str(self.value) + + +class OSArch(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_ARCH = "unknown_arch" + AMD64 = "amd64" + X86 = "x86" + ARM = "arm" + ARM64 = "arm64" + + def __str__(self) -> str: + return str(self.value) + + +class OSType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + SERVER = "server" + VIRTU = "virtu" + PANEL = "panel" + DESKTOP = "desktop" + CUSTOM = "custom" + RESCUE = "rescue" + + def __str__(self) -> str: + return str(self.value) + + +class OfferAntiDosInfoType(str, Enum, metaclass=StrEnumMeta): + MINIMAL = "minimal" + PREVENTIVE = "preventive" + CURATIVE = "curative" + + def __str__(self) -> str: + return str(self.value) + + +class OfferCatalog(str, Enum, metaclass=StrEnumMeta): + ALL = "all" + DEFAULT = "default" + BETA = "beta" + RESELLER = "reseller" + PREMIUM = "premium" + VOLUME = "volume" + ADMIN = "admin" + INACTIVE = "inactive" + + def __str__(self) -> str: + return str(self.value) + + +class OfferPaymentFrequency(str, Enum, metaclass=StrEnumMeta): + MONTHLY = "monthly" + ONESHOT = "oneshot" + + def __str__(self) -> str: + return str(self.value) + + +class OfferSANInfoType(str, Enum, metaclass=StrEnumMeta): + HDD = "hdd" + SSD = "ssd" + + def __str__(self) -> str: + return str(self.value) + + +class OfferServerInfoStock(str, Enum, metaclass=StrEnumMeta): + EMPTY = "empty" + LOW = "low" + AVAILABLE = "available" + + def __str__(self) -> str: + return str(self.value) + + +class PartitionFileSystem(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + EFI = "efi" + SWAP = "swap" + EXT4 = "ext4" + EXT3 = "ext3" + EXT2 = "ext2" + XFS = "xfs" + NTFS = "ntfs" + FAT32 = "fat32" + UFS = "ufs" + + def __str__(self) -> str: + return str(self.value) + + +class PartitionType(str, Enum, metaclass=StrEnumMeta): + PRIMARY = "primary" + EXTENDED = "extended" + LOGICAL = "logical" + + def __str__(self) -> str: + return str(self.value) + + +class RaidArrayRaidLevel(str, Enum, metaclass=StrEnumMeta): + NO_RAID = "no_raid" + RAID0 = "raid0" + RAID1 = "raid1" + RAID5 = "raid5" + RAID6 = "raid6" + RAID10 = "raid10" + + def __str__(self) -> str: + return str(self.value) + + +class RefundMethod(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_REFUND_METHOD = "unknown_refund_method" + CREDIT_CARD = "credit_card" + AMEX = "amex" + PAYPAL = "paypal" + TRANSFER = "transfer" + + def __str__(self) -> str: + return str(self.value) + + +class RefundStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_REFUND_STATUS = "unknown_refund_status" + UNPAID = "unpaid" + PAID = "paid" + ERRORED = "errored" + + def __str__(self) -> str: + return str(self.value) + + +class RescueProtocol(str, Enum, metaclass=StrEnumMeta): + VNC = "vnc" + SSH = "ssh" + + def __str__(self) -> str: + return str(self.value) + + +class RpnGroupMemberStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_RPN_MEMBER_STATUS = "unknown_rpn_member_status" + PENDING_INVITATION = "pending_invitation" + ACTIVE = "active" + CREATING = "creating" + DELETING = "deleting" + DELETED = "deleted" + + def __str__(self) -> str: + return str(self.value) + + +class RpnGroupType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + LOCAL = "local" + SHARED = "shared" + + def __str__(self) -> str: + return str(self.value) + + +class RpnSanIpType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + SERVER_IP = "server_ip" + RPNV2_SUBNET = "rpnv2_subnet" + + def __str__(self) -> str: + return str(self.value) + + +class RpnSanStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + CREATING = "creating" + ACTIVE = "active" + DELETING = "deleting" + MAINTENANCE = "maintenance" + + def __str__(self) -> str: + return str(self.value) + + +class RpnV2GroupStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_GROUP_STATUS = "unknown_group_status" + CREATING = "creating" + ACTIVE = "active" + UPDATING = "updating" + DELETING = "deleting" + + def __str__(self) -> str: + return str(self.value) + + +class RpnV2GroupType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + STANDARD = "standard" + QINQ = "qinq" + + def __str__(self) -> str: + return str(self.value) + + +class RpnV2MemberStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_MEMBER_STATUS = "unknown_member_status" + CREATING = "creating" + ACTIVE = "active" + UPDATING = "updating" + DELETING = "deleting" + + def __str__(self) -> str: + return str(self.value) + + +class ServerDiskType(str, Enum, metaclass=StrEnumMeta): + SATA = "sata" + SSD = "ssd" + SAS = "sas" + SSHD = "sshd" + USB = "usb" + NVME = "nvme" + + def __str__(self) -> str: + return str(self.value) + + +class ServerInstallStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + BOOTING = "booting" + SETTING_UP_RAID = "setting_up_raid" + PARTITIONING = "partitioning" + FORMATTING = "formatting" + INSTALLING = "installing" + CONFIGURING = "configuring" + CONFIGURING_BOOTLOADER = "configuring_bootloader" + REBOOTING = "rebooting" + INSTALLED = "installed" + + def __str__(self) -> str: + return str(self.value) + + +class ServerStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + DELIVERING = "delivering" + INSTALLING = "installing" + READY = "ready" + STOPPED = "stopped" + ERROR = "error" + LOCKED = "locked" + RESCUE = "rescue" + BUSY = "busy" + + def __str__(self) -> str: + return str(self.value) + + +class ServiceLevelLevel(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + BASIC = "basic" + BUSINESS = "business" + + def __str__(self) -> str: + return str(self.value) + + +class ServiceProvisioningStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN = "unknown" + DELIVERING = "delivering" + READY = "ready" + ERROR = "error" + EXPIRING = "expiring" + EXPIRED = "expired" + + def __str__(self) -> str: + return str(self.value) + + +class ServiceType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + SERVICE = "service" + ORDER = "order" + + def __str__(self) -> str: + return str(self.value) + + +@dataclass +class OfferAntiDosInfo: + type_: OfferAntiDosInfoType + + +@dataclass +class OfferBackupInfo: + size: int + + +@dataclass +class OfferBandwidthInfo: + speed: int + + +@dataclass +class OfferLicenseInfo: + bound_to_ip: bool + + +@dataclass +class OfferRPNInfo: + speed: int + + +@dataclass +class OfferSANInfo: + size: int + """ + SAN size (in bytes). + """ + + ha: bool + """ + High availabilty offer. + """ + + device_type: OfferSANInfoType + """ + Type of SAN device (hdd / ssd). + """ + + +@dataclass +class OfferStorageInfo: + max_quota: int + + size: int + + +@dataclass +class IP: + ip_id: str + """ + ID of the IP. + """ + + address: str + """ + Address of the IP. + """ + + reverse: str + """ + Reverse IP value. + """ + + version: IPVersion + """ + Version of IP (v4 or v6). + """ + + cidr: int + """ + Classless InterDomain Routing notation of the IP. + """ + + netmask: str + """ + Network mask of IP. + """ + + semantic: IPSemantic + """ + Semantic of IP. + """ + + gateway: str + """ + Gateway of IP. + """ + + status: IPStatus + """ + Status of the IP. + """ + + +@dataclass +class Offer: + id: int + """ + ID of the offer. + """ + + name: str + """ + Name of the offer. + """ + + catalog: OfferCatalog + """ + Catalog of the offer. + """ + + payment_frequency: OfferPaymentFrequency + """ + Payment frequency of the offer. + """ + + pricing: Optional[Money] + """ + Price of the offer. + """ + + server_info: Optional[OfferServerInfo] + + service_level_info: Optional[OfferServiceLevelInfo] + + rpn_info: Optional[OfferRPNInfo] + + san_info: Optional[OfferSANInfo] + + antidos_info: Optional[OfferAntiDosInfo] + + backup_info: Optional[OfferBackupInfo] + + usb_storage_info: Optional[OfferStorageInfo] + + storage_info: Optional[OfferStorageInfo] + + license_info: Optional[OfferLicenseInfo] + + failover_ip_info: Optional[OfferFailoverIpInfo] + + failover_block_info: Optional[OfferFailoverBlockInfo] + + bandwidth_info: Optional[OfferBandwidthInfo] + + +@dataclass +class NetworkInterface: + card_id: int + """ + Card ID of the network interface. + """ + + device_id: int + """ + Device ID of the network interface. + """ + + mac: str + """ + MAC address of the network interface. + """ + + type_: NetworkInterfaceInterfaceType + """ + Network interface type. + """ + + ips: List[IP] + """ + IPs of the network interface. + """ + + +@dataclass +class OS: + id: int + """ + ID of the OS. + """ + + name: str + """ + Name of the OS. + """ + + type_: OSType + """ + Type of the OS. + """ + + version: str + """ + Version of the OS. + """ + + arch: OSArch + """ + Architecture of the OS. + """ + + allow_custom_partitioning: bool + """ + True if the OS allow custom partitioning. + """ + + allow_ssh_keys: bool + """ + True if the OS allow SSH Keys. + """ + + requires_user: bool + """ + True if the OS requires user. + """ + + requires_admin_password: bool + """ + True if the OS requires admin password. + """ + + requires_panel_password: bool + """ + True if the OS requires panel password. + """ + + allowed_filesystems: List[PartitionFileSystem] + """ + True if the OS allow file systems. + """ + + requires_license: bool + """ + True if the OS requires license. + """ + + license_offers: List[Offer] + """ + License offers available with the OS. + """ + + display_name: str + """ + Display name of the OS. + """ + + password_regex: str + """ + Regex used to validate the installation passwords. + """ + + hostname_max_length: int + """ + Hostname max length. + """ + + max_partitions: Optional[int] + """ + Maximum number of partitions which can be created. + """ + + panel_password_regex: Optional[str] + """ + Regex used to validate the panel installation password. + """ + + requires_valid_hostname: Optional[bool] + """ + If both requires_valid_hostname & hostname_regex are set, it means that at least one of the criterias must be valid. + """ + + hostname_regex: Optional[str] + """ + If both requires_valid_hostname & hostname_regex are set, it means that at least one of the criterias must be valid. + """ + + released_at: Optional[datetime] + """ + OS release date. + """ + + +@dataclass +class ServerLocation: + rack: str + + room: str + + datacenter_name: str + + +@dataclass +class ServerOption: + options: List[ServerOption] + + offer: Optional[Offer] + + created_at: Optional[datetime] + + updated_at: Optional[datetime] + + expired_at: Optional[datetime] + + +@dataclass +class ServiceLevel: + offer_id: int + """ + Offer ID of service level. + """ + + level: ServiceLevelLevel + """ + Level type of service level. + """ + + +@dataclass +class RpnSan: + id: int + """ + RPN SAN ID. + """ + + datacenter_name: str + """ + Datacenter location. + """ + + organization_id: str + """ + Organization ID. + """ + + project_id: str + """ + Project ID. + """ + + server_hostname: str + """ + RPN SAN server hostname. + """ + + iqn_suffix: str + """ + IQN suffix. + """ + + offer_id: int + """ + Offer ID. + """ + + created_at: Optional[datetime] + """ + Date of creation of the RPN SAN. + """ + + offer_name: str + """ + Offer description. + """ + + status: RpnSanStatus + """ + Status. + """ + + storage_size: int + """ + RPN SAN storage size. + """ + + iqn: str + + rpnv1_compatible: bool + """ + True if the SAN is compatible with the RPNv1 technology. + """ + + rpnv1_implicit: bool + """ + True if the offer supports the RPNv1 implicitly, false if it must to be added to a group to support RPNv1. + """ + + offer: Optional[Offer] + + delivered_at: Optional[datetime] + """ + RPN SAN delivery date. + """ + + terminated_at: Optional[datetime] + """ + RPN SAN termination date. + """ + + expires_at: Optional[datetime] + """ + RPN SAN expiration date. + """ + + +@dataclass +class RpnGroup: + id: int + """ + Rpn group member ID. + """ + + name: str + """ + Rpn group name. + """ + + type_: RpnGroupType + """ + Rpn group type (local or shared). + """ + + active: bool + """ + Whether the group is active or not. + """ + + owner: str + """ + RPN group owner. + """ + + members_count: int + """ + Total number of members. + """ + + organization_id: str + """ + Rpn group organization ID. + """ + + project_id: str + """ + Rpn group project ID. + """ + + created_at: Optional[datetime] + """ + Rpn group creation date. + """ + + +@dataclass +class RpnV2GroupSubnet: + address: str + + cidr: int + + +@dataclass +class Server: + id: int + """ + ID of the server. + """ + + organization_id: str + """ + Organization ID the server is attached to. + """ + + project_id: str + """ + Project ID the server is attached to. + """ + + hostname: str + """ + Hostname of the server. + """ + + rebooted_at: Optional[datetime] + """ + Date of last reboot of the server. + """ + + status: ServerStatus + """ + Status of the server. + """ + + abuse_contact: str + """ + Abuse contact of the server. + """ + + interfaces: List[NetworkInterface] + """ + Network interfaces of the server. + """ + + zone: Zone + """ + The zone in which is the server. + """ + + options: List[ServerOption] + """ + Options subscribe on the server. + """ + + has_bmc: bool + """ + Boolean if the server has a BMC. + """ + + tags: List[str] + """ + Array of customs tags attached to the server. + """ + + is_outsourced: bool + """ + Whether the server is outsourced or not. + """ + + ipv6_slaac: bool + """ + Whether or not you can enable/disable the IPv6. + """ + + qinq: bool + """ + Whether the server is compatible with QinQ. + """ + + is_rpnv2_member: bool + """ + Whether or not the server is already part of an rpnv2 group. + """ + + created_at: Optional[datetime] + """ + Date of creation of the server. + """ + + updated_at: Optional[datetime] + """ + Date of last modification of the server. + """ + + expired_at: Optional[datetime] + """ + Date of release of the server. + """ + + offer: Optional[Offer] + """ + Offer of the server. + """ + + location: Optional[ServerLocation] + """ + Location of the server. + """ + + os: Optional[OS] + """ + OS installed on the server. + """ + + level: Optional[ServiceLevel] + """ + Service level of the server. + """ + + rescue_os: Optional[OS] + """ + Rescue OS of the server. + """ + + +@dataclass +class FailoverBlock: + id: int + """ + ID of the failover block. + """ + + address: str + """ + IP of the failover block. + """ + + nameservers: List[str] + """ + Name servers. + """ + + ip_version: FailoverBlockVersion + """ + IP version of the failover block. + """ + + cidr: int + """ + Classless InterDomain Routing notation of the failover block. + """ + + netmask: str + """ + Netmask of the failover block. + """ + + gateway_ip: str + """ + Gateway IP of the failover block. + """ + + +@dataclass +class RpnSanIpRpnV2Group: + id: int + + name: str + + +@dataclass +class RpnSanIpServer: + id: int + + hostname: str + + datacenter_name: str + + +@dataclass +class RpnSanServer: + id: int + """ + The RPN SAN server ID. + """ + + datacenter_name: str + """ + The RPN SAN server datacenter name. + """ + + hostname: str + """ + The RPN SAN server hostname. + """ + + sans: List[RpnSan] + """ + RPN SANs linked to the RPN SAN server. + """ + + zone: Zone + """ + The RPN SAN server zone. + """ + + +@dataclass +class RpnV2Group: + id: int + """ + RPN V2 group ID. + """ + + name: str + """ + RPN V2 group name. + """ + + compatible_rpnv1: bool + """ + Whether or not the RPN V1 compatibility was enabled. + """ + + organization_id: str + """ + Organization ID of the RPN V2 group. + """ + + project_id: str + """ + Project ID of the RPN V2 group. + """ + + type_: RpnV2GroupType + """ + RPN V2 group type (qing / standard). + """ + + status: RpnV2GroupStatus + """ + RPN V2 group status. + """ + + owner: str + """ + RPN V2 group owner. + """ + + members_count: int + """ + Total number of members. + """ + + gateway: str + """ + RPN V2 gateway. + """ + + subnet: Optional[RpnV2GroupSubnet] + """ + RPN V2 subnet. + """ + + rpnv1_group: Optional[RpnGroup] + """ + The RPNv1 group (if the compatibility was enabled). + """ + + +@dataclass +class RpnV2Member: + id: int + """ + RPN V2 member ID. + """ + + status: RpnV2MemberStatus + """ + RPN V2 member status. + """ + + vlan: str + """ + RPN V2 member VLAN. + """ + + speed: Optional[int] + """ + RPN speed. + """ + + server: Optional[Server] + + rpnv1_group: Optional[RpnGroup] + + +@dataclass +class ServerDisk: + id: int + + connector: str + + type_: ServerDiskType + + capacity: int + + is_addon: bool + + +@dataclass +class Service: + id: int + """ + ID of the service. + """ + + provisioning_status: ServiceProvisioningStatus + """ + Provisioning status of the service. + """ + + type_: ServiceType + """ + Service type, either order or service. + """ + + resource_id: Optional[int] + """ + Resource ID of the service. + """ + + offer: Optional[Offer] + """ + Offer of the service. + """ + + created_at: Optional[datetime] + """ + Creation date of the service. + """ + + delivered_at: Optional[datetime] + """ + Delivery date of the service. + """ + + terminated_at: Optional[datetime] + """ + Terminatation date of the service. + """ + + expires_at: Optional[datetime] + """ + Expiration date of the service. + """ + + +@dataclass +class GetIPv6BlockQuotasResponseQuota: + quota: int + + cidr: int + + +@dataclass +class InstallPartition: + file_system: PartitionFileSystem + """ + File system of the installation partition. + """ + + raid_level: RaidArrayRaidLevel + """ + RAID level of the installation partition. + """ + + capacity: int + """ + Capacity of the installation partition. + """ + + connectors: List[str] + """ + Connectors of the installation partition. + """ + + mount_point: Optional[str] + """ + Mount point of the installation partition. + """ + + +@dataclass +class FailoverIP: + id: int + """ + ID of the failover IP. + """ + + address: str + """ + IP of the failover IP. + """ + + reverse: str + """ + Reverse IP value. + """ + + ip_version: FailoverIPVersion + """ + IP version of the failover IP. + """ + + cidr: int + """ + Classless InterDomain Routing notation of the failover IP. + """ + + netmask: str + """ + Netmask of the failover IP. + """ + + gateway_ip: str + """ + Gateway IP of the failover IP. + """ + + status: FailoverIPStatus + """ + Status of the IP failover. + """ + + type_: FailoverIPInterfaceType + """ + The interface type. + """ + + mac: Optional[str] + """ + MAC address of the IP failover. + """ + + server_id: Optional[int] + """ + Server ID linked to the IP failover. + """ + + block: Optional[FailoverBlock] + """ + Block of the IP failover. + """ + + server_zone: Optional[str] + """ + The server zone (if assigned). + """ + + +@dataclass +class ListIPv6BlockSubnetsAvailableResponseSubnet: + address: str + + cidr: int + + +@dataclass +class InvoiceSummary: + id: int + + status: InvoiceStatus + + payment_method: InvoicePaymentMethod + + transaction_id: int + + total_with_taxes: Optional[Money] + + total_without_taxes: Optional[Money] + + created_at: Optional[datetime] + + paid_at: Optional[datetime] + + +@dataclass +class RpnSanIp: + type_: RpnSanIpType + """ + IP type (server | rpnv2_subnet). + """ + + ip: Optional[IP] + """ + An IP object. + """ + + server: Optional[RpnSanIpServer] + + rpnv2_group: Optional[RpnSanIpRpnV2Group] + + +@dataclass +class RefundSummary: + id: int + + status: RefundStatus + + method: RefundMethod + + total_with_taxes: Optional[Money] + + total_without_taxes: Optional[Money] + + created_at: Optional[datetime] + + refunded_at: Optional[datetime] + + +@dataclass +class RpnGroupMember: + id: int + """ + Rpn group member ID. + """ + + status: RpnGroupMemberStatus + """ + RPN group member status. + """ + + group_id: int + """ + RPN group ID. + """ + + group_name: str + """ + RPN group name. + """ + + group_owner: str + """ + RPN group owner. + """ + + owner: str + """ + RPN member owner. + """ + + san_server: Optional[RpnSanServer] + """ + Authorized RPN SAN server. + """ + + server: Optional[Server] + """ + Authorized rpn v1 capable server. + """ + + speed: Optional[int] + """ + RPN speed. + """ + + +@dataclass +class RpnSanSummary: + id: int + """ + RPN SAN ID. + """ + + datacenter_name: str + """ + Datacenter location. + """ + + organization_id: str + """ + Organization ID. + """ + + project_id: str + """ + Project ID. + """ + + server_hostname: str + """ + RPN SAN server hostname. + """ + + iqn_suffix: str + """ + IQN suffix. + """ + + offer_id: int + """ + Offer ID. + """ + + created_at: Optional[datetime] + """ + Date of creation of the RPN SAN. + """ + + offer_name: str + """ + Offer description. + """ + + status: RpnSanStatus + """ + Status. + """ + + storage_size: int + """ + RPN SAN storage size. + """ + + rpnv1_compatible: bool + """ + True if the SAN is compatible with the RPNv1 technology. + """ + + rpnv1_implicit: bool + """ + True if the offer supports the RPNv1 implicitly, false if it must to be added to a group to support RPNv1. + """ + + delivered_at: Optional[datetime] + """ + RPN SAN delivery date. + """ + + terminated_at: Optional[datetime] + """ + RPN SAN termination date. + """ + + expires_at: Optional[datetime] + """ + RPN SAN expiration date. + """ + + +@dataclass +class RpnServerCapability: + id: int + """ + Server ID. + """ + + hostname: str + """ + Server hostname. + """ + + datacenter_name: str + """ + Server datacenter name. + """ + + zone: Zone + """ + Server zone. + """ + + compatible_qinq: bool + """ + True if server is compatible with QinQ protocol (rpn v2). + """ + + can_join_qinq_group: bool + """ + True if server can join a QinQ group. + """ + + rpnv1_group_count: int + """ + Times server is linked in a rpnv1 group. + """ + + rpnv2_group_count: int + """ + Times server is linked in a rpnv2 group. + """ + + can_join_rpnv2_group: bool + """ + True if server can join an rpnv2 group. + """ + + ip_address: Optional[str] + """ + Private IP address (if rpn compatiblle). + """ + + rpn_version: Optional[int] + """ + Supported rpn version. + """ + + +@dataclass +class Log: + id: int + """ + RPN V2 log ID. + """ + + action: LogAction + """ + Which action was performed. + """ + + status: LogStatus + """ + Action status. + """ + + group: Optional[RpnV2Group] + """ + RPN V2 group. + """ + + member: Optional[RpnV2Member] + """ + RPN V2 member (if appliable). + """ + + created_at: Optional[datetime] + """ + Creation date. + """ + + finished_at: Optional[datetime] + """ + Completion date. + """ + + +@dataclass +class ServerEvent: + event_id: int + """ + ID of the event. + """ + + description: str + """ + Descriptiion of the event. + """ + + date: Optional[datetime] + """ + Date of the event. + """ + + +@dataclass +class ServerSummary: + id: int + """ + ID of the server. + """ + + datacenter_name: str + """ + Datacenter of the server. + """ + + organization_id: str + """ + Organization ID the server is attached to. + """ + + project_id: str + """ + Project ID the server is attached to. + """ + + hostname: str + """ + Hostname of the server. + """ + + created_at: Optional[datetime] + """ + Date of creation of the server. + """ + + updated_at: Optional[datetime] + """ + Date of last modification of the server. + """ + + expired_at: Optional[datetime] + """ + Date of release of the server. + """ + + offer_id: int + """ + Offer ID of the server. + """ + + offer_name: str + """ + Offer name of the server. + """ + + status: ServerStatus + """ + Status of the server. + """ + + interfaces: List[NetworkInterface] + """ + Network interfaces of the server. + """ + + zone: Zone + """ + The zone in which is the server. + """ + + is_outsourced: bool + """ + Whether the server is outsourced or not. + """ + + qinq: bool + """ + Whether the server is compatible with QinQ. + """ + + os_id: Optional[int] + """ + OS ID installed on server. + """ + + level: Optional[ServiceLevel] + """ + Service level of the server. + """ + + rpn_version: Optional[int] + """ + Supported RPN version. + """ + + +@dataclass +class CPU: + name: str + """ + Name of CPU. + """ + + core_count: int + """ + Number of cores of the CPU. + """ + + thread_count: int + """ + Number of threads of the CPU. + """ + + frequency: int + """ + Frequency of the CPU. + """ + + +@dataclass +class Disk: + capacity: int + """ + Capacity of the disk. + """ + + type_: ServerDiskType + """ + Type of the disk. + """ + + +@dataclass +class Memory: + capacity: int + """ + Capacity of the memory. + """ + + type_: MemoryType + """ + Type of the memory. + """ + + frequency: int + """ + Frequency of the memory. + """ + + is_ecc: bool + """ + True if the memory is an error-correcting code memory. + """ + + +@dataclass +class PersistentMemory: + capacity: int + """ + Capacity of the persistent memory. + """ + + frequency: int + """ + Frequency of the persistent memory. + """ + + model: str + """ + Model of the persistent memory. + """ + + +@dataclass +class RaidController: + model: str + """ + Model of the RAID controller. + """ + + raid_level: List[str] + """ + RAID level of the RAID controller. + """ + + +@dataclass +class RaidArray: + raid_level: RaidArrayRaidLevel + """ + The RAID level. + """ + + disks: List[ServerDisk] + """ + Disks on the RAID controller. + """ + + +@dataclass +class Partition: + type_: PartitionType + """ + Type of the partition. + """ + + file_system: PartitionFileSystem + """ + File system of the partition. + """ + + raid_level: RaidArrayRaidLevel + """ + Raid level of the partition. + """ + + capacity: int + """ + Capacity of the partition. + """ + + connectors: List[str] + """ + Connectors of the partition. + """ + + mount_point: Optional[str] + """ + Mount point of the partition. + """ + + +@dataclass +class UpdatableRaidArray: + raid_level: RaidArrayRaidLevel + """ + The RAID level. + """ + + disk_ids: List[int] + """ + The list of Disk ID of the updatable RAID. + """ + + +@dataclass +class AttachFailoverIPToMacAddressRequest: + ip_id: int + """ + ID of the failover IP. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + type_: Optional[AttachFailoverIPToMacAddressRequestMacType] + """ + A mac type. + """ + + mac: Optional[str] + """ + A valid mac address (existing or not). + """ + + +@dataclass +class AttachFailoverIPsRequest: + server_id: int + """ + ID of the server. + """ + + fips_ids: List[int] + """ + List of ID of failovers IP to attach. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class BMCAccess: + url: str + """ + URL to access to the server console. + """ + + login: str + """ + The login to use for the BMC (Baseboard Management Controller) access authentification. + """ + + password: str + """ + The password to use for the BMC (Baseboard Management Controller) access authentification. + """ + + status: BMCAccessStatus + """ + Status of the connection. + """ + + expires_at: Optional[datetime] + """ + The date after which the BMC (Baseboard Management Controller) access will be closed. + """ + + +@dataclass +class Backup: + id: int + """ + ID of the backup. + """ + + login: str + """ + Login of the backup. + """ + + server: str + """ + Server of the backup. + """ + + status: BackupStatus + """ + Status of the backup. + """ + + acl_enabled: bool + """ + ACL enable boolean of the backup. + """ + + autologin: bool + """ + Autologin boolean of the backup. + """ + + quota_space: int + """ + Total quota space of the backup. + """ + + quota_space_used: int + """ + Quota space used of the backup. + """ + + quota_files: int + """ + Total quota files of the backup. + """ + + quota_files_used: int + """ + Quota files used of the backup. + """ + + +@dataclass +class BillingApiCanOrderRequest: + project_id: Optional[str] + + +@dataclass +class BillingApiDownloadInvoiceRequest: + invoice_id: int + + +@dataclass +class BillingApiDownloadRefundRequest: + refund_id: int + + +@dataclass +class BillingApiGetInvoiceRequest: + invoice_id: int + + +@dataclass +class BillingApiGetRefundRequest: + refund_id: int + + +@dataclass +class BillingApiListInvoicesRequest: + page: Optional[int] + + page_size: Optional[int] + + order_by: Optional[ListInvoicesRequestOrderBy] + + project_id: Optional[str] + + +@dataclass +class BillingApiListRefundsRequest: + page: Optional[int] + + page_size: Optional[int] + + order_by: Optional[ListRefundsRequestOrderBy] + + project_id: Optional[str] + + +@dataclass +class CanOrderResponse: + can_order: bool + + quota_ok: bool + + phone_confirmed: bool + + email_confirmed: bool + + user_confirmed: bool + + payment_mode: bool + + billing_ok: bool + + message: Optional[str] + + +@dataclass +class CancelServerInstallRequest: + server_id: int + """ + Server ID of the server to cancel install. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class CreateFailoverIPsRequest: + offer_id: int + """ + Failover IP offer ID. + """ + + quantity: int + """ + Quantity. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class CreateFailoverIPsResponse: + total_count: int + + services: List[Service] + + +@dataclass +class CreateServerRequest: + offer_id: int + """ + Offer ID of the new server. + """ + + server_option_ids: List[int] + """ + Server option IDs of the new server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + project_id: Optional[str] + """ + Project ID of the new server. + """ + + datacenter_name: Optional[str] + """ + Datacenter name of the new server. + """ + + +@dataclass +class DeleteFailoverIPRequest: + ip_id: int + """ + ID of the failover IP to delete. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class DeleteServerRequest: + server_id: int + """ + Server ID to delete. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class DeleteServiceRequest: + service_id: int + """ + ID of the service. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class DetachFailoverIPFromMacAddressRequest: + ip_id: int + """ + ID of the failover IP. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class DetachFailoverIPsRequest: + fips_ids: List[int] + """ + List of IDs of failovers IP to detach. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetBMCAccessRequest: + server_id: int + """ + ID of the server to get BMC access. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetFailoverIPRequest: + ip_id: int + """ + ID of the failover IP. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetIPv6BlockQuotasResponse: + quotas: List[GetIPv6BlockQuotasResponseQuota] + """ + Quota for each CIDR of IPv6 block. + """ + + total_count: int + """ + Total count of quotas. + """ + + +@dataclass +class GetOSRequest: + os_id: int + """ + ID of the OS. + """ + + server_id: int + """ + ID of the server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class GetOfferRequest: + offer_id: int + """ + ID of offer. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class GetOrderedServiceRequest: + ordered_service_id: int + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetRaidRequest: + server_id: int + """ + ID of the server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetRemainingQuotaRequest: + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class GetRemainingQuotaResponse: + failover_ip_quota: int + """ + Current failover IP quota. + """ + + failover_ip_remaining_quota: int + """ + Remaining failover IP quota. + """ + + failover_block_quota: int + """ + Current failover block quota. + """ + + failover_block_remaining_quota: int + """ + Remaining failover block quota. + """ + + +@dataclass +class GetRescueRequest: + server_id: int + """ + ID of the server to get rescue. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetRpnStatusResponse: + status: GetRpnStatusResponseStatus + """ + If status = 'operational', you can perform rpn actions in write. + """ + + operations_left: Optional[int] + """ + Number of operations left to perform before being operational. + """ + + +@dataclass +class GetServerBackupRequest: + server_id: int + """ + Server ID of the backup. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetServerDefaultPartitioningRequest: + server_id: int + """ + ID of the server. + """ + + os_id: int + """ + OS ID of the default partitioning. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetServerInstallRequest: + server_id: int + """ + Server ID of the server to install. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetServerRequest: + server_id: int + """ + ID of the server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class GetServiceRequest: + service_id: int + """ + ID of the service. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class IPv6Block: + id: int + """ + ID of the IPv6. + """ + + address: str + """ + Address of the IPv6. + """ + + duid: str + """ + DUID of the IPv6. + """ + + nameservers: List[str] + """ + DNS linked to the IPv6. + """ + + cidr: int + """ + Classless InterDomain Routing notation of the IPv6. + """ + + subnets: List[IPv6Block] + """ + All IPv6 subnets. + """ + + delegation_status: IPv6BlockDelegationStatus + """ + The nameservers delegation status. + """ + + +@dataclass +class IPv6BlockApiCreateIPv6BlockRequest: + project_id: Optional[str] + """ + ID of the project. + """ + + +@dataclass +class IPv6BlockApiCreateIPv6BlockSubnetRequest: + block_id: int + """ + ID of the IPv6 block. + """ + + address: str + """ + Address of the IPv6. + """ + + cidr: int + """ + Classless InterDomain Routing notation of the IPv6. + """ + + +@dataclass +class IPv6BlockApiDeleteIPv6BlockRequest: + block_id: int + """ + ID of the IPv6 block to delete. + """ + + +@dataclass +class IPv6BlockApiGetIPv6BlockQuotasRequest: + project_id: Optional[str] + """ + ID of the project. + """ + + +@dataclass +class IPv6BlockApiGetIPv6BlockRequest: + project_id: Optional[str] + """ + ID of the project. + """ + + +@dataclass +class IPv6BlockApiListIPv6BlockSubnetsAvailableRequest: + block_id: int + """ + ID of the IPv6 block. + """ + + +@dataclass +class IPv6BlockApiUpdateIPv6BlockRequest: + block_id: int + """ + ID of the IPv6 block. + """ + + nameservers: Optional[List[str]] + """ + DNS to link to the IPv6. + """ + + +@dataclass +class InstallServerRequest: + server_id: int + """ + Server ID to install. + """ + + os_id: int + """ + OS ID to install on the server. + """ + + hostname: str + """ + Hostname of the server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + user_login: Optional[str] + """ + User to install on the server. + """ + + user_password: Optional[str] + """ + User password to install on the server. + """ + + panel_password: Optional[str] + """ + Panel password to install on the server. + """ + + root_password: Optional[str] + """ + Root password to install on the server. + """ + + partitions: Optional[List[InstallPartition]] + """ + Partitions to install on the server. + """ + + ssh_key_ids: Optional[List[str]] + """ + SSH key IDs authorized on the server. + """ + + license_offer_id: Optional[int] + """ + Offer ID of license to install on server. + """ + + ip_id: Optional[int] + """ + IP to link at the license to install on server. + """ + + +@dataclass +class Invoice: + id: int + + status: InvoiceStatus + + payment_method: InvoicePaymentMethod + + content: str + + transaction_id: int + + total_with_taxes: Optional[Money] + + total_without_taxes: Optional[Money] + + created_at: Optional[datetime] + + paid_at: Optional[datetime] + + +@dataclass +class ListFailoverIPsRequest: + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of failovers IP per page. + """ + + order_by: Optional[ListFailoverIPsRequestOrderBy] + """ + Order of the failovers IP. + """ + + project_id: Optional[str] + """ + Filter failovers IP by project ID. + """ + + search: Optional[str] + """ + Filter failovers IP which matching with this field. + """ + + only_available: Optional[bool] + """ + True: return all failovers IP not attached on server +false: return all failovers IP attached on server. + """ + + +@dataclass +class ListFailoverIPsResponse: + total_count: int + """ + Total count of matching failovers IP. + """ + + failover_ips: List[FailoverIP] + """ + List of failover IPs that match filters. + """ + + +@dataclass +class ListIPv6BlockSubnetsAvailableResponse: + subnet_availables: List[ListIPv6BlockSubnetsAvailableResponseSubnet] + """ + All available address and CIDR available in subnet. + """ + + total_count: int + """ + Total count of available subnets. + """ + + +@dataclass +class ListInvoicesResponse: + total_count: int + + invoices: List[InvoiceSummary] + + +@dataclass +class ListIpsResponse: + total_count: int + """ + Total count of authorized IPs. + """ + + ips: List[RpnSanIp] + """ + List of authorized IPs. + """ + + +@dataclass +class ListOSRequest: + server_id: int + """ + Filter OS by compatible server ID. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of OS per page. + """ + + order_by: Optional[ListOSRequestOrderBy] + """ + Order of the OS. + """ + + type_: Optional[OSType] + """ + Type of the OS. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class ListOSResponse: + total_count: int + """ + Total count of matching OS. + """ + + os: List[OS] + """ + OS that match filters. + """ + + +@dataclass +class ListOffersRequest: + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of offer per page. + """ + + order_by: Optional[ListOffersRequestOrderBy] + """ + Order of the offers. + """ + + commercial_range: Optional[str] + """ + Filter on commercial range. + """ + + catalog: Optional[OfferCatalog] + """ + Filter on catalog. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + is_failover_ip: Optional[bool] + """ + Get the current failover IP offer. + """ + + is_failover_block: Optional[bool] + """ + Get the current failover IP block offer. + """ + + sold_in: Optional[List[str]] + """ + Filter offers depending on their datacenter. + """ + + available_only: Optional[bool] + """ + Set this filter to true to only return available offers. + """ + + is_rpn_san: Optional[bool] + """ + Get the RPN SAN offers. + """ + + +@dataclass +class ListOffersResponse: + total_count: int + """ + Total count of matching offers. + """ + + offers: List[Offer] + """ + Offers that match filters. + """ + + +@dataclass +class ListRefundsResponse: + total_count: int + + refunds: List[RefundSummary] + + +@dataclass +class ListRpnCapableSanServersResponse: + total_count: int + """ + Total count of rpn capable san servers. + """ + + san_servers: List[RpnSanServer] + """ + List of san servers. + """ + + +@dataclass +class ListRpnCapableServersResponse: + total_count: int + """ + Total count of rpn capable servers. + """ + + servers: List[Server] + """ + List of servers. + """ + + +@dataclass +class ListRpnGroupMembersResponse: + total_count: int + """ + Total count of rpn v1 group members. + """ + + members: List[RpnGroupMember] + """ + List of rpn v1 group members. + """ + + +@dataclass +class ListRpnGroupsResponse: + total_count: int + """ + Total count of rpn groups. + """ + + rpn_groups: List[RpnGroup] + """ + List of rpn v1 groups. + """ + + +@dataclass +class ListRpnInvitesResponse: + total_count: int + """ + Total count of invites. + """ + + members: List[RpnGroupMember] + """ + List of invites. + """ + + +@dataclass +class ListRpnSansResponse: + total_count: int + """ + Total count of matching RPN SANs. + """ + + rpn_sans: List[RpnSanSummary] + """ + List of RPN SANs that match filters. + """ + + +@dataclass +class ListRpnServerCapabilitiesResponse: + total_count: int + """ + Total count of servers. + """ + + servers: List[RpnServerCapability] + """ + List of servers and their RPN capabilities. + """ + + +@dataclass +class ListRpnV2CapableResourcesResponse: + total_count: int + """ + Total count of matching rpn v2 capable resources. + """ + + servers: List[Server] + """ + List of rpn v2 capable resources that match filters. + """ + + +@dataclass +class ListRpnV2GroupLogsResponse: + total_count: int + """ + Total count of matching rpn v2 logs. + """ + + logs: List[Log] + """ + List of rpn v2 logs that match filters. + """ + + +@dataclass +class ListRpnV2GroupsResponse: + total_count: int + """ + Total count of matching rpn v2 groups. + """ + + rpn_groups: List[RpnV2Group] + """ + List of rpn v2 groups that match filters. + """ + + +@dataclass +class ListRpnV2MembersResponse: + total_count: int + """ + Total count of matching rpn v2 group members. + """ + + members: List[RpnV2Member] + """ + List of rpn v2 group members that match filters. + """ + + +@dataclass +class ListServerDisksRequest: + server_id: int + """ + Server ID of the server disks. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of server disk per page. + """ + + order_by: Optional[ListServerDisksRequestOrderBy] + """ + Order of the server disks. + """ + + +@dataclass +class ListServerDisksResponse: + total_count: int + """ + Total count of matching server disks. + """ + + disks: List[ServerDisk] + """ + Server disks that match filters. + """ + + +@dataclass +class ListServerEventsRequest: + server_id: int + """ + Server ID of the server events. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of server event per page. + """ + + order_by: Optional[ListServerEventsRequestOrderBy] + """ + Order of the server events. + """ + + +@dataclass +class ListServerEventsResponse: + total_count: int + """ + Total count of matching server events. + """ + + events: List[ServerEvent] + """ + Server events that match filters. + """ + + +@dataclass +class ListServersRequest: + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of server per page. + """ + + order_by: Optional[ListServersRequestOrderBy] + """ + Order of the servers. + """ + + project_id: Optional[str] + """ + Filter servers by project ID. + """ + + search: Optional[str] + """ + Filter servers by hostname. + """ + + +@dataclass +class ListServersResponse: + total_count: int + """ + Total count of matching servers. + """ + + servers: List[ServerSummary] + """ + Servers that match filters. + """ + + +@dataclass +class ListServicesRequest: + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of service per page. + """ + + order_by: Optional[ListServicesRequestOrderBy] + """ + Order of the services. + """ + + project_id: Optional[str] + """ + Project ID. + """ + + +@dataclass +class ListServicesResponse: + total_count: int + """ + Total count of matching services. + """ + + services: List[Service] + """ + Services that match filters. + """ + + +@dataclass +class ListSubscribableServerOptionsRequest: + server_id: int + """ + Server ID of the subscribable server options. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of subscribable server option per page. + """ + + +@dataclass +class ListSubscribableServerOptionsResponse: + total_count: int + """ + Total count of matching subscribable server options. + """ + + server_options: List[Offer] + """ + Server options that match filters. + """ + + +@dataclass +class OfferFailoverBlockInfo: + onetime_fees: Optional[Offer] + + +@dataclass +class OfferFailoverIpInfo: + onetime_fees: Optional[Offer] + + +@dataclass +class OfferServerInfo: + bandwidth: int + + stock: OfferServerInfoStock + + commercial_range: str + + disks: List[Disk] + + cpus: List[CPU] + + memories: List[Memory] + + persistent_memories: List[PersistentMemory] + + raid_controllers: List[RaidController] + + available_options: List[Offer] + + connectivity: int + + stock_by_datacenter: Dict[str, OfferServerInfoStock] + + rpn_version: Optional[int] + + onetime_fees: Optional[Offer] + + +@dataclass +class OfferServiceLevelInfo: + support_ticket: bool + + support_phone: bool + + sales_support: bool + + git: str + + sla: float + + priority_support: bool + + high_rpn_bandwidth: bool + + customization: bool + + antidos: bool + + extra_failover_quota: int + + available_options: List[Offer] + + +@dataclass +class Raid: + raid_arrays: List[RaidArray] + """ + Details about the RAID controller. + """ + + +@dataclass +class RebootServerRequest: + server_id: int + """ + Server ID to reboot. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class Refund: + id: int + + status: RefundStatus + + method: RefundMethod + + content: str + + total_with_taxes: Optional[Money] + + total_without_taxes: Optional[Money] + + created_at: Optional[datetime] + + refunded_at: Optional[datetime] + + +@dataclass +class Rescue: + os_id: int + """ + OS ID of the rescue. + """ + + login: str + """ + Login of the rescue. + """ + + password: str + """ + Password of the rescue. + """ + + protocol: RescueProtocol + """ + Protocol of the resuce. + """ + + +@dataclass +class RpnApiGetRpnStatusRequest: + project_id: Optional[str] + """ + A project ID. + """ + + rpnv1_group_id: Optional[int] + """ + An RPN v1 group ID. + """ + + rpnv2_group_id: Optional[int] + """ + An RPN v2 group ID. + """ + + +@dataclass +class RpnApiListRpnServerCapabilitiesRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of servers per page. + """ + + order_by: Optional[ListRpnServerCapabilitiesRequestOrderBy] + """ + Order of the servers. + """ + + project_id: Optional[str] + """ + Filter servers by project ID. + """ + + +@dataclass +class RpnSanApiAddIpRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + ip_ids: List[int] + """ + An array of IP ID. + """ + + +@dataclass +class RpnSanApiCreateRpnSanRequest: + offer_id: int + """ + Offer ID. + """ + + project_id: Optional[str] + """ + Your project ID. + """ + + +@dataclass +class RpnSanApiDeleteRpnSanRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + +@dataclass +class RpnSanApiGetRpnSanRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + +@dataclass +class RpnSanApiListAvailableIpsRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + type_: Optional[RpnSanIpType] + """ + Filter by IP type (server | rpnv2_subnet). + """ + + +@dataclass +class RpnSanApiListIpsRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + type_: Optional[RpnSanIpType] + """ + Filter by IP type (server | rpnv2_subnet). + """ + + +@dataclass +class RpnSanApiListRpnSansRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of RPN SANs per page. + """ + + order_by: Optional[ListRpnSansRequestOrderBy] + """ + Order of the RPN SANs. + """ + + project_id: Optional[str] + """ + Filter RPN SANs by project ID. + """ + + +@dataclass +class RpnSanApiRemoveIpRequest: + rpn_san_id: int + """ + RPN SAN ID. + """ + + ip_ids: List[int] + """ + An array of IP ID. + """ + + +@dataclass +class RpnV1ApiAcceptRpnInviteRequest: + member_id: int + """ + The member ID. + """ + + +@dataclass +class RpnV1ApiAddRpnGroupMembersRequest: + group_id: int + """ + The rpn v1 group ID. + """ + + server_ids: Optional[List[int]] + """ + A collection of rpn v1 capable server IDs. + """ + + san_server_ids: Optional[List[int]] + """ + A collection of rpn v1 capable RPN SAN server IDs. + """ + + +@dataclass +class RpnV1ApiCreateRpnGroupRequest: + name: str + """ + Rpn v1 group name. + """ + + server_ids: Optional[List[int]] + """ + A collection of rpn v1 capable servers. + """ + + san_server_ids: Optional[List[int]] + """ + A collection of rpn v1 capable rpn sans servers. + """ + + project_id: Optional[str] + """ + A project ID. + """ + + +@dataclass +class RpnV1ApiDeleteRpnGroupMembersRequest: + group_id: int + """ + The rpn v1 group ID. + """ + + member_ids: List[int] + """ + A collection of rpn v1 group members IDs. + """ + + +@dataclass +class RpnV1ApiDeleteRpnGroupRequest: + group_id: int + """ + Rpn v1 group ID. + """ + + +@dataclass +class RpnV1ApiGetRpnGroupRequest: + group_id: int + """ + Rpn v1 group ID. + """ + + +@dataclass +class RpnV1ApiLeaveRpnGroupRequest: + group_id: int + """ + The RPN V1 group ID. + """ + + member_ids: List[int] + """ + A collection of rpn v1 group members IDs. + """ + + project_id: Optional[str] + """ + A project ID. + """ + + +@dataclass +class RpnV1ApiListRpnCapableSanServersRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn capable resources per page. + """ + + order_by: Optional[ListRpnCapableSanServersRequestOrderBy] + """ + Order of the rpn capable resources. + """ + + project_id: Optional[str] + """ + Filter rpn capable resources by project ID. + """ + + +@dataclass +class RpnV1ApiListRpnCapableServersRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn capable resources per page. + """ + + order_by: Optional[ListRpnCapableServersRequestOrderBy] + """ + Order of the rpn capable resources. + """ + + project_id: Optional[str] + """ + Filter rpn capable resources by project ID. + """ + + +@dataclass +class RpnV1ApiListRpnGroupMembersRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v1 group members per page. + """ + + order_by: Optional[ListRpnGroupMembersRequestOrderBy] + """ + Order of the rpn v1 group members. + """ + + group_id: int + """ + Filter rpn v1 group members by group ID. + """ + + project_id: Optional[str] + """ + A project ID. + """ + + +@dataclass +class RpnV1ApiListRpnGroupsRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v1 groups per page. + """ + + order_by: Optional[ListRpnGroupsRequestOrderBy] + """ + Order of the rpn v1 groups. + """ + + project_id: Optional[str] + """ + Filter rpn v1 groups by project ID. + """ + + +@dataclass +class RpnV1ApiListRpnInvitesRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn capable resources per page. + """ + + order_by: Optional[ListRpnInvitesRequestOrderBy] + """ + Order of the rpn capable resources. + """ + + project_id: Optional[str] + """ + Filter rpn capable resources by project ID. + """ + + +@dataclass +class RpnV1ApiRefuseRpnInviteRequest: + member_id: int + """ + The member ID. + """ + + +@dataclass +class RpnV1ApiRpnGroupInviteRequest: + group_id: int + """ + The RPN V1 group ID. + """ + + server_ids: List[int] + """ + A collection of external server IDs. + """ + + project_id: Optional[str] + """ + A project ID. + """ + + +@dataclass +class RpnV1ApiUpdateRpnGroupNameRequest: + group_id: int + """ + Rpn v1 group ID. + """ + + name: Optional[str] + """ + New rpn v1 group name. + """ + + +@dataclass +class RpnV2ApiAddRpnV2MembersRequest: + group_id: int + """ + RPN V2 group ID. + """ + + servers: List[int] + """ + A collection of server IDs. + """ + + +@dataclass +class RpnV2ApiCreateRpnV2GroupRequest: + name: str + """ + RPN V2 group name. + """ + + servers: List[int] + """ + A collection of server IDs. + """ + + project_id: Optional[str] + """ + Project ID of the RPN V2 group. + """ + + type_: Optional[RpnV2GroupType] + """ + RPN V2 group type (qing / standard). + """ + + +@dataclass +class RpnV2ApiDeleteRpnV2GroupRequest: + group_id: int + """ + RPN V2 group ID. + """ + + +@dataclass +class RpnV2ApiDeleteRpnV2MembersRequest: + group_id: int + """ + RPN V2 group ID. + """ + + member_ids: List[int] + """ + A collection of member IDs. + """ + + +@dataclass +class RpnV2ApiDisableRpnV2GroupCompatibilityRequest: + group_id: int + """ + RPN V2 group ID. + """ + + +@dataclass +class RpnV2ApiEnableRpnV2GroupCompatibilityRequest: + group_id: int + """ + RPN V2 group ID. + """ + + rpnv1_group_id: int + """ + RPN V1 group ID. + """ + + +@dataclass +class RpnV2ApiGetRpnV2GroupRequest: + group_id: int + """ + RPN V2 group ID. + """ + + +@dataclass +class RpnV2ApiListRpnV2CapableResourcesRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v2 capable resources per page. + """ + + order_by: Optional[ListRpnV2CapableResourcesRequestOrderBy] + """ + Order of the rpn v2 capable resources. + """ + + project_id: Optional[str] + """ + Filter rpn v2 capable resources by project ID. + """ + + +@dataclass +class RpnV2ApiListRpnV2GroupLogsRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v2 group logs per page. + """ + + order_by: Optional[ListRpnV2GroupLogsRequestOrderBy] + """ + Order of the rpn v2 group logs. + """ + + group_id: int + """ + RPN V2 group ID. + """ + + +@dataclass +class RpnV2ApiListRpnV2GroupsRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v2 groups per page. + """ + + order_by: Optional[ListRpnV2GroupsRequestOrderBy] + """ + Order of the rpn v2 groups. + """ + + project_id: Optional[str] + """ + Filter rpn v2 groups by project ID. + """ + + +@dataclass +class RpnV2ApiListRpnV2MembersRequest: + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Number of rpn v2 group members per page. + """ + + order_by: Optional[ListRpnV2MembersRequestOrderBy] + """ + Order of the rpn v2 group members. + """ + + group_id: int + """ + RPN V2 group ID. + """ + + type_: Optional[ListRpnV2MembersRequestType] + """ + Filter members by type. + """ + + +@dataclass +class RpnV2ApiUpdateRpnV2GroupNameRequest: + group_id: int + """ + RPN V2 group ID. + """ + + name: Optional[str] + """ + RPN V2 group name. + """ + + +@dataclass +class RpnV2ApiUpdateRpnV2VlanForMembersRequest: + group_id: int + """ + RPN V2 group ID. + """ + + member_ids: List[int] + """ + RPN V2 member IDs. + """ + + vlan: Optional[int] + """ + Min: 0. +Max: 3967. + """ + + +@dataclass +class ServerDefaultPartitioning: + partitions: List[Partition] + """ + Default partitions. + """ + + +@dataclass +class ServerInstall: + os_id: int + + hostname: str + + partitions: List[Partition] + + ssh_key_ids: List[str] + + status: ServerInstallStatus + + user_login: Optional[str] + + panel_url: Optional[str] + + +@dataclass +class StartBMCAccessRequest: + server_id: int + """ + ID of the server to start the BMC access. + """ + + ip: str + """ + The IP authorized to connect to the given server. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class StartRescueRequest: + server_id: int + """ + ID of the server to start rescue. + """ + + os_id: int + """ + OS ID to use to start rescue. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class StartServerRequest: + server_id: int + """ + Server ID to start. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class StopBMCAccessRequest: + server_id: int + """ + ID of the server to stop BMC access. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class StopRescueRequest: + server_id: int + """ + ID of the server to stop rescue. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class StopServerRequest: + server_id: int + """ + Server ID to stop. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class SubscribeServerOptionRequest: + server_id: int + """ + Server ID to subscribe server option. + """ + + option_id: int + """ + Option ID to subscribe. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class SubscribeStorageOptionsRequest: + server_id: int + """ + Server ID of the storage options to subscribe. + """ + + options_ids: List[int] + """ + Option IDs of the storage options to subscribe. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class SubscribeStorageOptionsResponse: + services: List[Service] + """ + Services subscribe storage options. + """ + + +@dataclass +class UpdateRaidRequest: + server_id: int + """ + ID of the server. + """ + + raid_arrays: List[UpdatableRaidArray] + """ + RAIDs to update. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class UpdateReverseRequest: + ip_id: int + """ + ID of the IP. + """ + + reverse: str + """ + Reverse to apply on the IP. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + +@dataclass +class UpdateServerBackupRequest: + server_id: int + """ + Server ID to update backup. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + password: Optional[str] + """ + Password of the server backup. + """ + + autologin: Optional[bool] + """ + Autologin of the server backup. + """ + + acl_enabled: Optional[bool] + """ + Boolean to enable or disable ACL. + """ + + +@dataclass +class UpdateServerRequest: + server_id: int + """ + Server ID to update. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + hostname: Optional[str] + """ + Hostname of the server to update. + """ + + enable_ipv6: Optional[bool] + """ + Flag to enable or not the IPv6 of server. + """ + + +@dataclass +class UpdateServerTagsRequest: + server_id: int + """ + Server ID to update the tags. + """ + + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + + tags: Optional[List[str]] + """ + Tags of server to update. + """ From f8a3a6e550cace91050b5a6c693fcfc3c7d378c6 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Thu, 4 Apr 2024 17:13:26 +0200 Subject: [PATCH 11/25] doc(secret_manager): reword CreateSecret description (#482) --- scaleway-async/scaleway_async/secret/v1beta1/api.py | 2 +- scaleway/scaleway/secret/v1beta1/api.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scaleway-async/scaleway_async/secret/v1beta1/api.py b/scaleway-async/scaleway_async/secret/v1beta1/api.py index b3e64a57c..7b8be5097 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/api.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/api.py @@ -68,7 +68,7 @@ async def create_secret( ) -> Secret: """ Create a secret. - You must specify the `region` to create a secret. + Create a secret in a given region specified by the `region` parameter. :param name: Name of the secret. :param protected: A protected secret cannot be deleted. :param region: Region to target. If none is passed will use default region from the config. diff --git a/scaleway/scaleway/secret/v1beta1/api.py b/scaleway/scaleway/secret/v1beta1/api.py index f7c2eba91..c23fac71a 100644 --- a/scaleway/scaleway/secret/v1beta1/api.py +++ b/scaleway/scaleway/secret/v1beta1/api.py @@ -68,7 +68,7 @@ def create_secret( ) -> Secret: """ Create a secret. - You must specify the `region` to create a secret. + Create a secret in a given region specified by the `region` parameter. :param name: Name of the secret. :param protected: A protected secret cannot be deleted. :param region: Region to target. If none is passed will use default region from the config. From ca1835c436f09c1da08888bf73f6883664961d24 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Thu, 4 Apr 2024 17:14:43 +0200 Subject: [PATCH 12/25] docs(serverless_jobs): add documentation for command, timezone and schedule fields (#483) --- scaleway-async/scaleway_async/jobs/v1alpha1/api.py | 2 +- scaleway-async/scaleway_async/jobs/v1alpha1/types.py | 8 +++++++- scaleway/scaleway/jobs/v1alpha1/api.py | 2 +- scaleway/scaleway/jobs/v1alpha1/types.py | 8 +++++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/scaleway-async/scaleway_async/jobs/v1alpha1/api.py b/scaleway-async/scaleway_async/jobs/v1alpha1/api.py index 3260c1c59..3515d6191 100644 --- a/scaleway-async/scaleway_async/jobs/v1alpha1/api.py +++ b/scaleway-async/scaleway_async/jobs/v1alpha1/api.py @@ -64,7 +64,7 @@ async def create_job_definition( :param cpu_limit: CPU limit of the job. :param memory_limit: Memory limit of the job (in MiB). :param image_uri: Image to use for the job. - :param command: Startup command. + :param command: Startup command. If empty or not defined, the image's default command is used. :param description: Description of the job. :param region: Region to target. If none is passed will use default region from the config. :param name: Name of the job definition. diff --git a/scaleway-async/scaleway_async/jobs/v1alpha1/types.py b/scaleway-async/scaleway_async/jobs/v1alpha1/types.py index 63579d2d3..9476080d7 100644 --- a/scaleway-async/scaleway_async/jobs/v1alpha1/types.py +++ b/scaleway-async/scaleway_async/jobs/v1alpha1/types.py @@ -48,8 +48,14 @@ def __str__(self) -> str: @dataclass class CronSchedule: schedule: str + """ + UNIX cron schedule to run job (e.g., '* * * * *'). + """ timezone: str + """ + Timezone for the cron schedule, in tz database format (e.g., 'Europe/Paris'). + """ @dataclass @@ -159,7 +165,7 @@ class CreateJobDefinitionRequest: command: str """ - Startup command. + Startup command. If empty or not defined, the image's default command is used. """ description: str diff --git a/scaleway/scaleway/jobs/v1alpha1/api.py b/scaleway/scaleway/jobs/v1alpha1/api.py index 41cf14439..30f97a32b 100644 --- a/scaleway/scaleway/jobs/v1alpha1/api.py +++ b/scaleway/scaleway/jobs/v1alpha1/api.py @@ -64,7 +64,7 @@ def create_job_definition( :param cpu_limit: CPU limit of the job. :param memory_limit: Memory limit of the job (in MiB). :param image_uri: Image to use for the job. - :param command: Startup command. + :param command: Startup command. If empty or not defined, the image's default command is used. :param description: Description of the job. :param region: Region to target. If none is passed will use default region from the config. :param name: Name of the job definition. diff --git a/scaleway/scaleway/jobs/v1alpha1/types.py b/scaleway/scaleway/jobs/v1alpha1/types.py index 63579d2d3..9476080d7 100644 --- a/scaleway/scaleway/jobs/v1alpha1/types.py +++ b/scaleway/scaleway/jobs/v1alpha1/types.py @@ -48,8 +48,14 @@ def __str__(self) -> str: @dataclass class CronSchedule: schedule: str + """ + UNIX cron schedule to run job (e.g., '* * * * *'). + """ timezone: str + """ + Timezone for the cron schedule, in tz database format (e.g., 'Europe/Paris'). + """ @dataclass @@ -159,7 +165,7 @@ class CreateJobDefinitionRequest: command: str """ - Startup command. + Startup command. If empty or not defined, the image's default command is used. """ description: str From cd4069368f5cd23d8421168822796a63579c69f3 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Fri, 5 Apr 2024 16:42:01 +0200 Subject: [PATCH 13/25] feat(vpc/v2): allow routing activation on existing VPCs (#484) --- .../scaleway_async/vpc/v2/__init__.py | 2 ++ scaleway-async/scaleway_async/vpc/v2/api.py | 35 +++++++++++++++++++ scaleway-async/scaleway_async/vpc/v2/types.py | 10 ++++++ scaleway/scaleway/vpc/v2/__init__.py | 2 ++ scaleway/scaleway/vpc/v2/api.py | 35 +++++++++++++++++++ scaleway/scaleway/vpc/v2/types.py | 10 ++++++ 6 files changed, 94 insertions(+) diff --git a/scaleway-async/scaleway_async/vpc/v2/__init__.py b/scaleway-async/scaleway_async/vpc/v2/__init__.py index 9ab28205a..0c334a81c 100644 --- a/scaleway-async/scaleway_async/vpc/v2/__init__.py +++ b/scaleway-async/scaleway_async/vpc/v2/__init__.py @@ -14,6 +14,7 @@ from .types import DeleteSubnetsResponse from .types import DeleteVPCRequest from .types import EnableDHCPRequest +from .types import EnableRoutingRequest from .types import GetPrivateNetworkRequest from .types import GetVPCRequest from .types import ListPrivateNetworksRequest @@ -42,6 +43,7 @@ "DeleteSubnetsResponse", "DeleteVPCRequest", "EnableDHCPRequest", + "EnableRoutingRequest", "GetPrivateNetworkRequest", "GetVPCRequest", "ListPrivateNetworksRequest", diff --git a/scaleway-async/scaleway_async/vpc/v2/api.py b/scaleway-async/scaleway_async/vpc/v2/api.py index 226f46f19..af19dbdef 100644 --- a/scaleway-async/scaleway_async/vpc/v2/api.py +++ b/scaleway-async/scaleway_async/vpc/v2/api.py @@ -697,6 +697,41 @@ async def enable_dhcp( self._throw_on_error(res) return unmarshal_PrivateNetwork(res.json()) + async def enable_routing( + self, + *, + vpc_id: str, + region: Optional[Region] = None, + ) -> VPC: + """ + Enable routing on a VPC. + Enable routing on an existing VPC. Note that you will not be able to deactivate it afterwards. + :param vpc_id: + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`VPC ` + + Usage: + :: + + result = await api.enable_routing( + vpc_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_vpc_id = validate_path_param("vpc_id", vpc_id) + + res = self._request( + "POST", + f"/vpc/v2/regions/{param_region}/vpcs/{param_vpc_id}/enable-routing", + body={}, + ) + + self._throw_on_error(res) + return unmarshal_VPC(res.json()) + async def set_subnets( self, *, diff --git a/scaleway-async/scaleway_async/vpc/v2/types.py b/scaleway-async/scaleway_async/vpc/v2/types.py index c74e7bd31..424bbde12 100644 --- a/scaleway-async/scaleway_async/vpc/v2/types.py +++ b/scaleway-async/scaleway_async/vpc/v2/types.py @@ -320,6 +320,16 @@ class EnableDHCPRequest: """ +@dataclass +class EnableRoutingRequest: + vpc_id: str + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + @dataclass class GetPrivateNetworkRequest: private_network_id: str diff --git a/scaleway/scaleway/vpc/v2/__init__.py b/scaleway/scaleway/vpc/v2/__init__.py index 9ab28205a..0c334a81c 100644 --- a/scaleway/scaleway/vpc/v2/__init__.py +++ b/scaleway/scaleway/vpc/v2/__init__.py @@ -14,6 +14,7 @@ from .types import DeleteSubnetsResponse from .types import DeleteVPCRequest from .types import EnableDHCPRequest +from .types import EnableRoutingRequest from .types import GetPrivateNetworkRequest from .types import GetVPCRequest from .types import ListPrivateNetworksRequest @@ -42,6 +43,7 @@ "DeleteSubnetsResponse", "DeleteVPCRequest", "EnableDHCPRequest", + "EnableRoutingRequest", "GetPrivateNetworkRequest", "GetVPCRequest", "ListPrivateNetworksRequest", diff --git a/scaleway/scaleway/vpc/v2/api.py b/scaleway/scaleway/vpc/v2/api.py index 45ee44cf3..a21d5500a 100644 --- a/scaleway/scaleway/vpc/v2/api.py +++ b/scaleway/scaleway/vpc/v2/api.py @@ -697,6 +697,41 @@ def enable_dhcp( self._throw_on_error(res) return unmarshal_PrivateNetwork(res.json()) + def enable_routing( + self, + *, + vpc_id: str, + region: Optional[Region] = None, + ) -> VPC: + """ + Enable routing on a VPC. + Enable routing on an existing VPC. Note that you will not be able to deactivate it afterwards. + :param vpc_id: + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`VPC ` + + Usage: + :: + + result = api.enable_routing( + vpc_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_vpc_id = validate_path_param("vpc_id", vpc_id) + + res = self._request( + "POST", + f"/vpc/v2/regions/{param_region}/vpcs/{param_vpc_id}/enable-routing", + body={}, + ) + + self._throw_on_error(res) + return unmarshal_VPC(res.json()) + def set_subnets( self, *, diff --git a/scaleway/scaleway/vpc/v2/types.py b/scaleway/scaleway/vpc/v2/types.py index c74e7bd31..424bbde12 100644 --- a/scaleway/scaleway/vpc/v2/types.py +++ b/scaleway/scaleway/vpc/v2/types.py @@ -320,6 +320,16 @@ class EnableDHCPRequest: """ +@dataclass +class EnableRoutingRequest: + vpc_id: str + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + @dataclass class GetPrivateNetworkRequest: private_network_id: str From cf0f122bed813f6ee8cd7f26e4cc2d8590b26188 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Thu, 11 Apr 2024 17:07:09 +0200 Subject: [PATCH 14/25] feat(secret_manager): add secret type messages to simplify deserialization (#486) --- .../scaleway_async/secret/v1beta1/__init__.py | 6 +++ .../scaleway_async/secret/v1beta1/types.py | 54 +++++++++++++++++++ scaleway/scaleway/secret/v1beta1/__init__.py | 6 +++ scaleway/scaleway/secret/v1beta1/types.py | 54 +++++++++++++++++++ 4 files changed, 120 insertions(+) diff --git a/scaleway-async/scaleway_async/secret/v1beta1/__init__.py b/scaleway-async/scaleway_async/secret/v1beta1/__init__.py index b00f3efcf..f11f378fc 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/__init__.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/__init__.py @@ -34,6 +34,9 @@ from .types import ListTagsRequest from .types import ListTagsResponse from .types import ProtectSecretRequest +from .types import SecretTypeBasicCredentials +from .types import SecretTypeDatabaseCredentials +from .types import SecretTypeSSHKey from .types import UnprotectSecretRequest from .types import UpdateSecretRequest from .types import UpdateSecretVersionRequest @@ -74,6 +77,9 @@ "ListTagsRequest", "ListTagsResponse", "ProtectSecretRequest", + "SecretTypeBasicCredentials", + "SecretTypeDatabaseCredentials", + "SecretTypeSSHKey", "UnprotectSecretRequest", "UpdateSecretRequest", "UpdateSecretVersionRequest", diff --git a/scaleway-async/scaleway_async/secret/v1beta1/types.py b/scaleway-async/scaleway_async/secret/v1beta1/types.py index fddeb57a2..cc769f331 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/types.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/types.py @@ -725,6 +725,60 @@ class ProtectSecretRequest: """ +@dataclass +class SecretTypeBasicCredentials: + username: str + """ + The username or identifier associated with the credentials. + """ + + password: str + """ + The password associated with the credentials. + """ + + +@dataclass +class SecretTypeDatabaseCredentials: + engine: str + """ + Supported database engines are: 'postgres', 'mysql', 'other'. + """ + + username: str + """ + The username used to authenticate to the database server. + """ + + password: str + """ + The password used to authenticate to the database server. + """ + + host: str + """ + The hostname or resolvable DNS name of the database server. + """ + + dbname: str + """ + The name of the database to connect to. + """ + + port: str + """ + The port must be an integer ranging from 0 to 65535. + """ + + +@dataclass +class SecretTypeSSHKey: + ssh_private_key: str + """ + The private SSH key. + """ + + @dataclass class UnprotectSecretRequest: secret_id: str diff --git a/scaleway/scaleway/secret/v1beta1/__init__.py b/scaleway/scaleway/secret/v1beta1/__init__.py index b00f3efcf..f11f378fc 100644 --- a/scaleway/scaleway/secret/v1beta1/__init__.py +++ b/scaleway/scaleway/secret/v1beta1/__init__.py @@ -34,6 +34,9 @@ from .types import ListTagsRequest from .types import ListTagsResponse from .types import ProtectSecretRequest +from .types import SecretTypeBasicCredentials +from .types import SecretTypeDatabaseCredentials +from .types import SecretTypeSSHKey from .types import UnprotectSecretRequest from .types import UpdateSecretRequest from .types import UpdateSecretVersionRequest @@ -74,6 +77,9 @@ "ListTagsRequest", "ListTagsResponse", "ProtectSecretRequest", + "SecretTypeBasicCredentials", + "SecretTypeDatabaseCredentials", + "SecretTypeSSHKey", "UnprotectSecretRequest", "UpdateSecretRequest", "UpdateSecretVersionRequest", diff --git a/scaleway/scaleway/secret/v1beta1/types.py b/scaleway/scaleway/secret/v1beta1/types.py index fddeb57a2..cc769f331 100644 --- a/scaleway/scaleway/secret/v1beta1/types.py +++ b/scaleway/scaleway/secret/v1beta1/types.py @@ -725,6 +725,60 @@ class ProtectSecretRequest: """ +@dataclass +class SecretTypeBasicCredentials: + username: str + """ + The username or identifier associated with the credentials. + """ + + password: str + """ + The password associated with the credentials. + """ + + +@dataclass +class SecretTypeDatabaseCredentials: + engine: str + """ + Supported database engines are: 'postgres', 'mysql', 'other'. + """ + + username: str + """ + The username used to authenticate to the database server. + """ + + password: str + """ + The password used to authenticate to the database server. + """ + + host: str + """ + The hostname or resolvable DNS name of the database server. + """ + + dbname: str + """ + The name of the database to connect to. + """ + + port: str + """ + The port must be an integer ranging from 0 to 65535. + """ + + +@dataclass +class SecretTypeSSHKey: + ssh_private_key: str + """ + The private SSH key. + """ + + @dataclass class UnprotectSecretRequest: secret_id: str From a81328507d4f592db9ece6535feaf6760b54d578 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Thu, 11 Apr 2024 17:07:37 +0200 Subject: [PATCH 15/25] chore(serverless_jobs): filter job defs and runs by organization_id (#487) --- scaleway-async/scaleway_async/jobs/v1alpha1/api.py | 14 ++++++++++++++ .../scaleway_async/jobs/v1alpha1/types.py | 4 ++++ scaleway/scaleway/jobs/v1alpha1/api.py | 14 ++++++++++++++ scaleway/scaleway/jobs/v1alpha1/types.py | 4 ++++ 4 files changed, 36 insertions(+) diff --git a/scaleway-async/scaleway_async/jobs/v1alpha1/api.py b/scaleway-async/scaleway_async/jobs/v1alpha1/api.py index 3515d6191..b4547eee7 100644 --- a/scaleway-async/scaleway_async/jobs/v1alpha1/api.py +++ b/scaleway-async/scaleway_async/jobs/v1alpha1/api.py @@ -159,6 +159,7 @@ async def list_job_definitions( page_size: Optional[int] = None, order_by: Optional[ListJobDefinitionsRequestOrderBy] = None, project_id: Optional[str] = None, + organization_id: Optional[str] = None, ) -> ListJobDefinitionsResponse: """ List all your job definitions with filters. @@ -167,6 +168,7 @@ async def list_job_definitions( :param page_size: :param order_by: :param project_id: + :param organization_id: :return: :class:`ListJobDefinitionsResponse ` Usage: @@ -184,6 +186,8 @@ async def list_job_definitions( f"/serverless-jobs/v1alpha1/regions/{param_region}/job-definitions", params={ "order_by": order_by, + "organization_id": organization_id + or self.client.default_organization_id, "page": page, "page_size": page_size or self.client.default_page_size, "project_id": project_id or self.client.default_project_id, @@ -201,6 +205,7 @@ async def list_job_definitions_all( page_size: Optional[int] = None, order_by: Optional[ListJobDefinitionsRequestOrderBy] = None, project_id: Optional[str] = None, + organization_id: Optional[str] = None, ) -> List[JobDefinition]: """ List all your job definitions with filters. @@ -209,6 +214,7 @@ async def list_job_definitions_all( :param page_size: :param order_by: :param project_id: + :param organization_id: :return: :class:`List[JobDefinition] ` Usage: @@ -227,6 +233,7 @@ async def list_job_definitions_all( "page_size": page_size, "order_by": order_by, "project_id": project_id, + "organization_id": organization_id, }, ) @@ -462,6 +469,7 @@ async def list_job_runs( order_by: Optional[ListJobRunsRequestOrderBy] = None, job_definition_id: Optional[str] = None, project_id: Optional[str] = None, + organization_id: Optional[str] = None, ) -> ListJobRunsResponse: """ List all job runs with filters. @@ -471,6 +479,7 @@ async def list_job_runs( :param order_by: :param job_definition_id: :param project_id: + :param organization_id: :return: :class:`ListJobRunsResponse ` Usage: @@ -489,6 +498,8 @@ async def list_job_runs( params={ "job_definition_id": job_definition_id, "order_by": order_by, + "organization_id": organization_id + or self.client.default_organization_id, "page": page, "page_size": page_size or self.client.default_page_size, "project_id": project_id or self.client.default_project_id, @@ -507,6 +518,7 @@ async def list_job_runs_all( order_by: Optional[ListJobRunsRequestOrderBy] = None, job_definition_id: Optional[str] = None, project_id: Optional[str] = None, + organization_id: Optional[str] = None, ) -> List[JobRun]: """ List all job runs with filters. @@ -516,6 +528,7 @@ async def list_job_runs_all( :param order_by: :param job_definition_id: :param project_id: + :param organization_id: :return: :class:`List[JobRun] ` Usage: @@ -535,5 +548,6 @@ async def list_job_runs_all( "order_by": order_by, "job_definition_id": job_definition_id, "project_id": project_id, + "organization_id": organization_id, }, ) diff --git a/scaleway-async/scaleway_async/jobs/v1alpha1/types.py b/scaleway-async/scaleway_async/jobs/v1alpha1/types.py index 9476080d7..8cf9e213a 100644 --- a/scaleway-async/scaleway_async/jobs/v1alpha1/types.py +++ b/scaleway-async/scaleway_async/jobs/v1alpha1/types.py @@ -260,6 +260,8 @@ class ListJobDefinitionsRequest: project_id: Optional[str] + organization_id: Optional[str] + @dataclass class ListJobDefinitionsResponse: @@ -285,6 +287,8 @@ class ListJobRunsRequest: project_id: Optional[str] + organization_id: Optional[str] + @dataclass class ListJobRunsResponse: diff --git a/scaleway/scaleway/jobs/v1alpha1/api.py b/scaleway/scaleway/jobs/v1alpha1/api.py index 30f97a32b..015ad4aa8 100644 --- a/scaleway/scaleway/jobs/v1alpha1/api.py +++ b/scaleway/scaleway/jobs/v1alpha1/api.py @@ -159,6 +159,7 @@ def list_job_definitions( page_size: Optional[int] = None, order_by: Optional[ListJobDefinitionsRequestOrderBy] = None, project_id: Optional[str] = None, + organization_id: Optional[str] = None, ) -> ListJobDefinitionsResponse: """ List all your job definitions with filters. @@ -167,6 +168,7 @@ def list_job_definitions( :param page_size: :param order_by: :param project_id: + :param organization_id: :return: :class:`ListJobDefinitionsResponse ` Usage: @@ -184,6 +186,8 @@ def list_job_definitions( f"/serverless-jobs/v1alpha1/regions/{param_region}/job-definitions", params={ "order_by": order_by, + "organization_id": organization_id + or self.client.default_organization_id, "page": page, "page_size": page_size or self.client.default_page_size, "project_id": project_id or self.client.default_project_id, @@ -201,6 +205,7 @@ def list_job_definitions_all( page_size: Optional[int] = None, order_by: Optional[ListJobDefinitionsRequestOrderBy] = None, project_id: Optional[str] = None, + organization_id: Optional[str] = None, ) -> List[JobDefinition]: """ List all your job definitions with filters. @@ -209,6 +214,7 @@ def list_job_definitions_all( :param page_size: :param order_by: :param project_id: + :param organization_id: :return: :class:`List[JobDefinition] ` Usage: @@ -227,6 +233,7 @@ def list_job_definitions_all( "page_size": page_size, "order_by": order_by, "project_id": project_id, + "organization_id": organization_id, }, ) @@ -462,6 +469,7 @@ def list_job_runs( order_by: Optional[ListJobRunsRequestOrderBy] = None, job_definition_id: Optional[str] = None, project_id: Optional[str] = None, + organization_id: Optional[str] = None, ) -> ListJobRunsResponse: """ List all job runs with filters. @@ -471,6 +479,7 @@ def list_job_runs( :param order_by: :param job_definition_id: :param project_id: + :param organization_id: :return: :class:`ListJobRunsResponse ` Usage: @@ -489,6 +498,8 @@ def list_job_runs( params={ "job_definition_id": job_definition_id, "order_by": order_by, + "organization_id": organization_id + or self.client.default_organization_id, "page": page, "page_size": page_size or self.client.default_page_size, "project_id": project_id or self.client.default_project_id, @@ -507,6 +518,7 @@ def list_job_runs_all( order_by: Optional[ListJobRunsRequestOrderBy] = None, job_definition_id: Optional[str] = None, project_id: Optional[str] = None, + organization_id: Optional[str] = None, ) -> List[JobRun]: """ List all job runs with filters. @@ -516,6 +528,7 @@ def list_job_runs_all( :param order_by: :param job_definition_id: :param project_id: + :param organization_id: :return: :class:`List[JobRun] ` Usage: @@ -535,5 +548,6 @@ def list_job_runs_all( "order_by": order_by, "job_definition_id": job_definition_id, "project_id": project_id, + "organization_id": organization_id, }, ) diff --git a/scaleway/scaleway/jobs/v1alpha1/types.py b/scaleway/scaleway/jobs/v1alpha1/types.py index 9476080d7..8cf9e213a 100644 --- a/scaleway/scaleway/jobs/v1alpha1/types.py +++ b/scaleway/scaleway/jobs/v1alpha1/types.py @@ -260,6 +260,8 @@ class ListJobDefinitionsRequest: project_id: Optional[str] + organization_id: Optional[str] + @dataclass class ListJobDefinitionsResponse: @@ -285,6 +287,8 @@ class ListJobRunsRequest: project_id: Optional[str] + organization_id: Optional[str] + @dataclass class ListJobRunsResponse: From ff576948815f5a2d5e912e3782ca9f2f485ae254 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Thu, 11 Apr 2024 17:13:50 +0200 Subject: [PATCH 16/25] feat(secret_manager): add AccessSecretVersionByPath endpoint (#491) --- .../scaleway_async/secret/v1beta1/__init__.py | 2 + .../scaleway_async/secret/v1beta1/api.py | 50 +++++++++++++++++++ .../scaleway_async/secret/v1beta1/types.py | 31 ++++++++++++ scaleway/scaleway/secret/v1beta1/__init__.py | 2 + scaleway/scaleway/secret/v1beta1/api.py | 50 +++++++++++++++++++ scaleway/scaleway/secret/v1beta1/types.py | 31 ++++++++++++ 6 files changed, 166 insertions(+) diff --git a/scaleway-async/scaleway_async/secret/v1beta1/__init__.py b/scaleway-async/scaleway_async/secret/v1beta1/__init__.py index f11f378fc..7cc33c694 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/__init__.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/__init__.py @@ -14,6 +14,7 @@ from .types import BrowseSecretsResponseItem from .types import SecretVersion from .types import Secret +from .types import AccessSecretVersionByPathRequest from .types import AccessSecretVersionRequest from .types import AccessSecretVersionResponse from .types import AddSecretOwnerRequest @@ -57,6 +58,7 @@ "BrowseSecretsResponseItem", "SecretVersion", "Secret", + "AccessSecretVersionByPathRequest", "AccessSecretVersionRequest", "AccessSecretVersionResponse", "AddSecretOwnerRequest", diff --git a/scaleway-async/scaleway_async/secret/v1beta1/api.py b/scaleway-async/scaleway_async/secret/v1beta1/api.py index 7b8be5097..0c74b127b 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/api.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/api.py @@ -832,6 +832,56 @@ async def access_secret_version( self._throw_on_error(res) return unmarshal_AccessSecretVersionResponse(res.json()) + async def access_secret_version_by_path( + self, + *, + secret_path: str, + secret_name: str, + revision: str, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AccessSecretVersionResponse: + """ + Access a secret's version using the secret's name and path. + Access sensitive data in a secret's version specified by the `region`, `secret_name`, `secret_path` and `revision` parameters. + :param secret_path: Secret's path. + :param secret_name: Secret's name. + :param revision: The first version of the secret is numbered 1, and all subsequent revisions augment by 1. Value can be either: + - an integer (the revision number) + - "latest" (the latest revision) + - "latest_enabled" (the latest enabled revision). + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project to target. + :return: :class:`AccessSecretVersionResponse ` + + Usage: + :: + + result = await api.access_secret_version_by_path( + secret_path="example", + secret_name="example", + revision="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_revision = validate_path_param("revision", revision) + + res = self._request( + "GET", + f"/secret-manager/v1beta1/regions/{param_region}/secrets-by-path/versions/{param_revision}/access", + params={ + "project_id": project_id or self.client.default_project_id, + "secret_name": secret_name, + "secret_path": secret_path, + }, + ) + + self._throw_on_error(res) + return unmarshal_AccessSecretVersionResponse(res.json()) + async def enable_secret_version( self, *, diff --git a/scaleway-async/scaleway_async/secret/v1beta1/types.py b/scaleway-async/scaleway_async/secret/v1beta1/types.py index cc769f331..17f547b62 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/types.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/types.py @@ -280,6 +280,37 @@ class Secret: """ +@dataclass +class AccessSecretVersionByPathRequest: + secret_path: str + """ + Secret's path. + """ + + secret_name: str + """ + Secret's name. + """ + + revision: str + """ + The first version of the secret is numbered 1, and all subsequent revisions augment by 1. Value can be either: +- an integer (the revision number) +- "latest" (the latest revision) +- "latest_enabled" (the latest enabled revision). + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + @dataclass class AccessSecretVersionRequest: secret_id: str diff --git a/scaleway/scaleway/secret/v1beta1/__init__.py b/scaleway/scaleway/secret/v1beta1/__init__.py index f11f378fc..7cc33c694 100644 --- a/scaleway/scaleway/secret/v1beta1/__init__.py +++ b/scaleway/scaleway/secret/v1beta1/__init__.py @@ -14,6 +14,7 @@ from .types import BrowseSecretsResponseItem from .types import SecretVersion from .types import Secret +from .types import AccessSecretVersionByPathRequest from .types import AccessSecretVersionRequest from .types import AccessSecretVersionResponse from .types import AddSecretOwnerRequest @@ -57,6 +58,7 @@ "BrowseSecretsResponseItem", "SecretVersion", "Secret", + "AccessSecretVersionByPathRequest", "AccessSecretVersionRequest", "AccessSecretVersionResponse", "AddSecretOwnerRequest", diff --git a/scaleway/scaleway/secret/v1beta1/api.py b/scaleway/scaleway/secret/v1beta1/api.py index c23fac71a..d6daf94f8 100644 --- a/scaleway/scaleway/secret/v1beta1/api.py +++ b/scaleway/scaleway/secret/v1beta1/api.py @@ -832,6 +832,56 @@ def access_secret_version( self._throw_on_error(res) return unmarshal_AccessSecretVersionResponse(res.json()) + def access_secret_version_by_path( + self, + *, + secret_path: str, + secret_name: str, + revision: str, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AccessSecretVersionResponse: + """ + Access a secret's version using the secret's name and path. + Access sensitive data in a secret's version specified by the `region`, `secret_name`, `secret_path` and `revision` parameters. + :param secret_path: Secret's path. + :param secret_name: Secret's name. + :param revision: The first version of the secret is numbered 1, and all subsequent revisions augment by 1. Value can be either: + - an integer (the revision number) + - "latest" (the latest revision) + - "latest_enabled" (the latest enabled revision). + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project to target. + :return: :class:`AccessSecretVersionResponse ` + + Usage: + :: + + result = api.access_secret_version_by_path( + secret_path="example", + secret_name="example", + revision="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_revision = validate_path_param("revision", revision) + + res = self._request( + "GET", + f"/secret-manager/v1beta1/regions/{param_region}/secrets-by-path/versions/{param_revision}/access", + params={ + "project_id": project_id or self.client.default_project_id, + "secret_name": secret_name, + "secret_path": secret_path, + }, + ) + + self._throw_on_error(res) + return unmarshal_AccessSecretVersionResponse(res.json()) + def enable_secret_version( self, *, diff --git a/scaleway/scaleway/secret/v1beta1/types.py b/scaleway/scaleway/secret/v1beta1/types.py index cc769f331..17f547b62 100644 --- a/scaleway/scaleway/secret/v1beta1/types.py +++ b/scaleway/scaleway/secret/v1beta1/types.py @@ -280,6 +280,37 @@ class Secret: """ +@dataclass +class AccessSecretVersionByPathRequest: + secret_path: str + """ + Secret's path. + """ + + secret_name: str + """ + Secret's name. + """ + + revision: str + """ + The first version of the secret is numbered 1, and all subsequent revisions augment by 1. Value can be either: +- an integer (the revision number) +- "latest" (the latest revision) +- "latest_enabled" (the latest enabled revision). + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + @dataclass class AccessSecretVersionRequest: secret_id: str From d2a16a1397ee2213c984cd4c718f11de475b4998 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Thu, 11 Apr 2024 17:16:29 +0200 Subject: [PATCH 17/25] feat(webhosting): add support for one_time_password (#488) --- .../webhosting/v1alpha1/marshalling.py | 20 +++++++++------ .../webhosting/v1alpha1/types.py | 25 +++++++++++-------- .../webhosting/v1alpha1/marshalling.py | 20 +++++++++------ .../scaleway/webhosting/v1alpha1/types.py | 25 +++++++++++-------- 4 files changed, 54 insertions(+), 36 deletions(-) diff --git a/scaleway-async/scaleway_async/webhosting/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/webhosting/v1alpha1/marshalling.py index 108fd7dbd..e6f39d9a3 100644 --- a/scaleway-async/scaleway_async/webhosting/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/webhosting/v1alpha1/marshalling.py @@ -102,6 +102,14 @@ def unmarshal_Hosting(data: Any) -> Hosting: if field is not None: args["offer_name"] = field + field = data.get("domain", None) + if field is not None: + args["domain"] = field + + field = data.get("tags", None) + if field is not None: + args["tags"] = field + field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field @@ -114,14 +122,6 @@ def unmarshal_Hosting(data: Any) -> Hosting: if field is not None: args["platform_number"] = field - field = data.get("domain", None) - if field is not None: - args["domain"] = field - - field = data.get("tags", None) - if field is not None: - args["tags"] = field - field = data.get("options", None) if field is not None: args["options"] = ( @@ -160,6 +160,10 @@ def unmarshal_Hosting(data: Any) -> Hosting: if field is not None: args["protected"] = field + field = data.get("one_time_password", None) + if field is not None: + args["one_time_password"] = field + field = data.get("region", None) if field is not None: args["region"] = field diff --git a/scaleway-async/scaleway_async/webhosting/v1alpha1/types.py b/scaleway-async/scaleway_async/webhosting/v1alpha1/types.py index b9650921b..5ad502b05 100644 --- a/scaleway-async/scaleway_async/webhosting/v1alpha1/types.py +++ b/scaleway-async/scaleway_async/webhosting/v1alpha1/types.py @@ -297,29 +297,29 @@ class Hosting: Name of the active offer for the Web Hosting plan. """ - updated_at: Optional[datetime] + domain: str """ - Date on which the Web Hosting plan was last updated. + Main domain associated with the Web Hosting plan. """ - created_at: Optional[datetime] + tags: List[str] """ - Date on which the Web Hosting plan was created. + List of tags associated with the Web Hosting plan. """ - platform_number: Optional[int] + updated_at: Optional[datetime] """ - Number of the host platform. + Date on which the Web Hosting plan was last updated. """ - domain: str + created_at: Optional[datetime] """ - Main domain associated with the Web Hosting plan. + Date on which the Web Hosting plan was created. """ - tags: List[str] + platform_number: Optional[int] """ - List of tags associated with the Web Hosting plan. + Number of the host platform. """ options: List[HostingOption] @@ -367,6 +367,11 @@ class Hosting: Whether the hosting is protected or not. """ + one_time_password: str + """ + One-time-password used for the first login or reset password, empty after first use. + """ + region: Region """ Region where the Web Hosting plan is hosted. diff --git a/scaleway/scaleway/webhosting/v1alpha1/marshalling.py b/scaleway/scaleway/webhosting/v1alpha1/marshalling.py index 108fd7dbd..e6f39d9a3 100644 --- a/scaleway/scaleway/webhosting/v1alpha1/marshalling.py +++ b/scaleway/scaleway/webhosting/v1alpha1/marshalling.py @@ -102,6 +102,14 @@ def unmarshal_Hosting(data: Any) -> Hosting: if field is not None: args["offer_name"] = field + field = data.get("domain", None) + if field is not None: + args["domain"] = field + + field = data.get("tags", None) + if field is not None: + args["tags"] = field + field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field @@ -114,14 +122,6 @@ def unmarshal_Hosting(data: Any) -> Hosting: if field is not None: args["platform_number"] = field - field = data.get("domain", None) - if field is not None: - args["domain"] = field - - field = data.get("tags", None) - if field is not None: - args["tags"] = field - field = data.get("options", None) if field is not None: args["options"] = ( @@ -160,6 +160,10 @@ def unmarshal_Hosting(data: Any) -> Hosting: if field is not None: args["protected"] = field + field = data.get("one_time_password", None) + if field is not None: + args["one_time_password"] = field + field = data.get("region", None) if field is not None: args["region"] = field diff --git a/scaleway/scaleway/webhosting/v1alpha1/types.py b/scaleway/scaleway/webhosting/v1alpha1/types.py index b9650921b..5ad502b05 100644 --- a/scaleway/scaleway/webhosting/v1alpha1/types.py +++ b/scaleway/scaleway/webhosting/v1alpha1/types.py @@ -297,29 +297,29 @@ class Hosting: Name of the active offer for the Web Hosting plan. """ - updated_at: Optional[datetime] + domain: str """ - Date on which the Web Hosting plan was last updated. + Main domain associated with the Web Hosting plan. """ - created_at: Optional[datetime] + tags: List[str] """ - Date on which the Web Hosting plan was created. + List of tags associated with the Web Hosting plan. """ - platform_number: Optional[int] + updated_at: Optional[datetime] """ - Number of the host platform. + Date on which the Web Hosting plan was last updated. """ - domain: str + created_at: Optional[datetime] """ - Main domain associated with the Web Hosting plan. + Date on which the Web Hosting plan was created. """ - tags: List[str] + platform_number: Optional[int] """ - List of tags associated with the Web Hosting plan. + Number of the host platform. """ options: List[HostingOption] @@ -367,6 +367,11 @@ class Hosting: Whether the hosting is protected or not. """ + one_time_password: str + """ + One-time-password used for the first login or reset password, empty after first use. + """ + region: Region """ Region where the Web Hosting plan is hosted. From ced132bf2f21c1390cffa00b0631eddeee522d79 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Thu, 11 Apr 2024 17:18:33 +0200 Subject: [PATCH 18/25] feat(iam): add support for pretty_name, unit and description in Quotum (#490) --- .../scaleway_async/iam/v1alpha1/marshalling.py | 12 ++++++++++++ .../scaleway_async/iam/v1alpha1/types.py | 15 +++++++++++++++ scaleway/scaleway/iam/v1alpha1/marshalling.py | 12 ++++++++++++ scaleway/scaleway/iam/v1alpha1/types.py | 15 +++++++++++++++ 4 files changed, 54 insertions(+) diff --git a/scaleway-async/scaleway_async/iam/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/iam/v1alpha1/marshalling.py index 83f6650ed..9895c9550 100644 --- a/scaleway-async/scaleway_async/iam/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/iam/v1alpha1/marshalling.py @@ -378,6 +378,18 @@ def unmarshal_Quotum(data: Any) -> Quotum: if field is not None: args["name"] = field + field = data.get("pretty_name", None) + if field is not None: + args["pretty_name"] = field + + field = data.get("unit", None) + if field is not None: + args["unit"] = field + + field = data.get("description", None) + if field is not None: + args["description"] = field + field = data.get("limit", None) if field is not None: args["limit"] = field diff --git a/scaleway-async/scaleway_async/iam/v1alpha1/types.py b/scaleway-async/scaleway_async/iam/v1alpha1/types.py index 7cedeaac2..74ed354ec 100644 --- a/scaleway-async/scaleway_async/iam/v1alpha1/types.py +++ b/scaleway-async/scaleway_async/iam/v1alpha1/types.py @@ -533,6 +533,21 @@ class Quotum: Name of the quota. """ + pretty_name: str + """ + A human-readable name for the quota. + """ + + unit: str + """ + The unit in which the quota is expressed. + """ + + description: str + """ + Details about the quota. + """ + limit: Optional[int] unlimited: Optional[bool] diff --git a/scaleway/scaleway/iam/v1alpha1/marshalling.py b/scaleway/scaleway/iam/v1alpha1/marshalling.py index 83f6650ed..9895c9550 100644 --- a/scaleway/scaleway/iam/v1alpha1/marshalling.py +++ b/scaleway/scaleway/iam/v1alpha1/marshalling.py @@ -378,6 +378,18 @@ def unmarshal_Quotum(data: Any) -> Quotum: if field is not None: args["name"] = field + field = data.get("pretty_name", None) + if field is not None: + args["pretty_name"] = field + + field = data.get("unit", None) + if field is not None: + args["unit"] = field + + field = data.get("description", None) + if field is not None: + args["description"] = field + field = data.get("limit", None) if field is not None: args["limit"] = field diff --git a/scaleway/scaleway/iam/v1alpha1/types.py b/scaleway/scaleway/iam/v1alpha1/types.py index 7cedeaac2..74ed354ec 100644 --- a/scaleway/scaleway/iam/v1alpha1/types.py +++ b/scaleway/scaleway/iam/v1alpha1/types.py @@ -533,6 +533,21 @@ class Quotum: Name of the quota. """ + pretty_name: str + """ + A human-readable name for the quota. + """ + + unit: str + """ + The unit in which the quota is expressed. + """ + + description: str + """ + Details about the quota. + """ + limit: Optional[int] unlimited: Optional[bool] From 4b8099370121d539990ae10ad48b8214d57741f2 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Thu, 11 Apr 2024 17:19:42 +0200 Subject: [PATCH 19/25] docs(sdb): backups are showing up in descending order (#489) --- .../scaleway_async/serverless_sqldb/v1alpha1/api.py | 4 ++-- .../scaleway_async/serverless_sqldb/v1alpha1/types.py | 2 +- scaleway/scaleway/serverless_sqldb/v1alpha1/api.py | 4 ++-- scaleway/scaleway/serverless_sqldb/v1alpha1/types.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/api.py b/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/api.py index 0a07ce5e9..23bbe6d9a 100644 --- a/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/api.py +++ b/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/api.py @@ -437,7 +437,7 @@ async def list_database_backups( ) -> ListDatabaseBackupsResponse: """ List your Serverless SQL Database backups. - List all Serverless SQL Database backups for a given Scaleway Project or Database. By default, the backups returned in the list are ordered by creation date in ascending order, though this can be modified via the order_by field. + List all Serverless SQL Database backups for a given Scaleway Project or Database. By default, the backups returned in the list are ordered by creation date in descending order, though this can be modified via the order_by field. :param database_id: Filter by the UUID of the Serverless SQL Database. :param region: Region to target. If none is passed will use default region from the config. :param organization_id: Filter by the UUID of the Scaleway organization. @@ -489,7 +489,7 @@ async def list_database_backups_all( ) -> List[DatabaseBackup]: """ List your Serverless SQL Database backups. - List all Serverless SQL Database backups for a given Scaleway Project or Database. By default, the backups returned in the list are ordered by creation date in ascending order, though this can be modified via the order_by field. + List all Serverless SQL Database backups for a given Scaleway Project or Database. By default, the backups returned in the list are ordered by creation date in descending order, though this can be modified via the order_by field. :param database_id: Filter by the UUID of the Serverless SQL Database. :param region: Region to target. If none is passed will use default region from the config. :param organization_id: Filter by the UUID of the Scaleway organization. diff --git a/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/types.py b/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/types.py index d7b885d74..2b2ea4807 100644 --- a/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/types.py +++ b/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/types.py @@ -39,8 +39,8 @@ def __str__(self) -> str: class ListDatabaseBackupsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): - CREATED_AT_ASC = "created_at_asc" CREATED_AT_DESC = "created_at_desc" + CREATED_AT_ASC = "created_at_asc" def __str__(self) -> str: return str(self.value) diff --git a/scaleway/scaleway/serverless_sqldb/v1alpha1/api.py b/scaleway/scaleway/serverless_sqldb/v1alpha1/api.py index 1d2dfdbe2..d23160665 100644 --- a/scaleway/scaleway/serverless_sqldb/v1alpha1/api.py +++ b/scaleway/scaleway/serverless_sqldb/v1alpha1/api.py @@ -435,7 +435,7 @@ def list_database_backups( ) -> ListDatabaseBackupsResponse: """ List your Serverless SQL Database backups. - List all Serverless SQL Database backups for a given Scaleway Project or Database. By default, the backups returned in the list are ordered by creation date in ascending order, though this can be modified via the order_by field. + List all Serverless SQL Database backups for a given Scaleway Project or Database. By default, the backups returned in the list are ordered by creation date in descending order, though this can be modified via the order_by field. :param database_id: Filter by the UUID of the Serverless SQL Database. :param region: Region to target. If none is passed will use default region from the config. :param organization_id: Filter by the UUID of the Scaleway organization. @@ -487,7 +487,7 @@ def list_database_backups_all( ) -> List[DatabaseBackup]: """ List your Serverless SQL Database backups. - List all Serverless SQL Database backups for a given Scaleway Project or Database. By default, the backups returned in the list are ordered by creation date in ascending order, though this can be modified via the order_by field. + List all Serverless SQL Database backups for a given Scaleway Project or Database. By default, the backups returned in the list are ordered by creation date in descending order, though this can be modified via the order_by field. :param database_id: Filter by the UUID of the Serverless SQL Database. :param region: Region to target. If none is passed will use default region from the config. :param organization_id: Filter by the UUID of the Scaleway organization. diff --git a/scaleway/scaleway/serverless_sqldb/v1alpha1/types.py b/scaleway/scaleway/serverless_sqldb/v1alpha1/types.py index d7b885d74..2b2ea4807 100644 --- a/scaleway/scaleway/serverless_sqldb/v1alpha1/types.py +++ b/scaleway/scaleway/serverless_sqldb/v1alpha1/types.py @@ -39,8 +39,8 @@ def __str__(self) -> str: class ListDatabaseBackupsRequestOrderBy(str, Enum, metaclass=StrEnumMeta): - CREATED_AT_ASC = "created_at_asc" CREATED_AT_DESC = "created_at_desc" + CREATED_AT_ASC = "created_at_asc" def __str__(self) -> str: return str(self.value) From e7ca7775700522a0db1e7f76307812ff95b27def Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Wed, 17 Apr 2024 15:16:58 +0200 Subject: [PATCH 20/25] fix(secret_manager): change secret type names to avoid conflicts in sdk (#492) --- .../scaleway_async/secret/v1beta1/__init__.py | 12 +-- .../scaleway_async/secret/v1beta1/types.py | 94 +++++++++---------- scaleway/scaleway/secret/v1beta1/__init__.py | 12 +-- scaleway/scaleway/secret/v1beta1/types.py | 94 +++++++++---------- 4 files changed, 106 insertions(+), 106 deletions(-) diff --git a/scaleway-async/scaleway_async/secret/v1beta1/__init__.py b/scaleway-async/scaleway_async/secret/v1beta1/__init__.py index 7cc33c694..8aec6e34e 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/__init__.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/__init__.py @@ -18,10 +18,12 @@ from .types import AccessSecretVersionRequest from .types import AccessSecretVersionResponse from .types import AddSecretOwnerRequest +from .types import BasicCredentials from .types import BrowseSecretsRequest from .types import BrowseSecretsResponse from .types import CreateSecretRequest from .types import CreateSecretVersionRequest +from .types import DatabaseCredentials from .types import DeleteSecretRequest from .types import DeleteSecretVersionRequest from .types import DisableSecretVersionRequest @@ -35,9 +37,7 @@ from .types import ListTagsRequest from .types import ListTagsResponse from .types import ProtectSecretRequest -from .types import SecretTypeBasicCredentials -from .types import SecretTypeDatabaseCredentials -from .types import SecretTypeSSHKey +from .types import SSHKey from .types import UnprotectSecretRequest from .types import UpdateSecretRequest from .types import UpdateSecretVersionRequest @@ -62,10 +62,12 @@ "AccessSecretVersionRequest", "AccessSecretVersionResponse", "AddSecretOwnerRequest", + "BasicCredentials", "BrowseSecretsRequest", "BrowseSecretsResponse", "CreateSecretRequest", "CreateSecretVersionRequest", + "DatabaseCredentials", "DeleteSecretRequest", "DeleteSecretVersionRequest", "DisableSecretVersionRequest", @@ -79,9 +81,7 @@ "ListTagsRequest", "ListTagsResponse", "ProtectSecretRequest", - "SecretTypeBasicCredentials", - "SecretTypeDatabaseCredentials", - "SecretTypeSSHKey", + "SSHKey", "UnprotectSecretRequest", "UpdateSecretRequest", "UpdateSecretVersionRequest", diff --git a/scaleway-async/scaleway_async/secret/v1beta1/types.py b/scaleway-async/scaleway_async/secret/v1beta1/types.py index 17f547b62..bd99f7dc3 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/types.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/types.py @@ -378,6 +378,19 @@ class AddSecretOwnerRequest: """ +@dataclass +class BasicCredentials: + username: str + """ + The username or identifier associated with the credentials. + """ + + password: str + """ + The password associated with the credentials. + """ + + @dataclass class BrowseSecretsRequest: prefix: str @@ -506,6 +519,39 @@ class CreateSecretVersionRequest: """ +@dataclass +class DatabaseCredentials: + engine: str + """ + Supported database engines are: 'postgres', 'mysql', 'other'. + """ + + username: str + """ + The username used to authenticate to the database server. + """ + + password: str + """ + The password used to authenticate to the database server. + """ + + host: str + """ + The hostname or resolvable DNS name of the database server. + """ + + dbname: str + """ + The name of the database to connect to. + """ + + port: str + """ + The port must be an integer ranging from 0 to 65535. + """ + + @dataclass class DeleteSecretRequest: secret_id: str @@ -757,53 +803,7 @@ class ProtectSecretRequest: @dataclass -class SecretTypeBasicCredentials: - username: str - """ - The username or identifier associated with the credentials. - """ - - password: str - """ - The password associated with the credentials. - """ - - -@dataclass -class SecretTypeDatabaseCredentials: - engine: str - """ - Supported database engines are: 'postgres', 'mysql', 'other'. - """ - - username: str - """ - The username used to authenticate to the database server. - """ - - password: str - """ - The password used to authenticate to the database server. - """ - - host: str - """ - The hostname or resolvable DNS name of the database server. - """ - - dbname: str - """ - The name of the database to connect to. - """ - - port: str - """ - The port must be an integer ranging from 0 to 65535. - """ - - -@dataclass -class SecretTypeSSHKey: +class SSHKey: ssh_private_key: str """ The private SSH key. diff --git a/scaleway/scaleway/secret/v1beta1/__init__.py b/scaleway/scaleway/secret/v1beta1/__init__.py index 7cc33c694..8aec6e34e 100644 --- a/scaleway/scaleway/secret/v1beta1/__init__.py +++ b/scaleway/scaleway/secret/v1beta1/__init__.py @@ -18,10 +18,12 @@ from .types import AccessSecretVersionRequest from .types import AccessSecretVersionResponse from .types import AddSecretOwnerRequest +from .types import BasicCredentials from .types import BrowseSecretsRequest from .types import BrowseSecretsResponse from .types import CreateSecretRequest from .types import CreateSecretVersionRequest +from .types import DatabaseCredentials from .types import DeleteSecretRequest from .types import DeleteSecretVersionRequest from .types import DisableSecretVersionRequest @@ -35,9 +37,7 @@ from .types import ListTagsRequest from .types import ListTagsResponse from .types import ProtectSecretRequest -from .types import SecretTypeBasicCredentials -from .types import SecretTypeDatabaseCredentials -from .types import SecretTypeSSHKey +from .types import SSHKey from .types import UnprotectSecretRequest from .types import UpdateSecretRequest from .types import UpdateSecretVersionRequest @@ -62,10 +62,12 @@ "AccessSecretVersionRequest", "AccessSecretVersionResponse", "AddSecretOwnerRequest", + "BasicCredentials", "BrowseSecretsRequest", "BrowseSecretsResponse", "CreateSecretRequest", "CreateSecretVersionRequest", + "DatabaseCredentials", "DeleteSecretRequest", "DeleteSecretVersionRequest", "DisableSecretVersionRequest", @@ -79,9 +81,7 @@ "ListTagsRequest", "ListTagsResponse", "ProtectSecretRequest", - "SecretTypeBasicCredentials", - "SecretTypeDatabaseCredentials", - "SecretTypeSSHKey", + "SSHKey", "UnprotectSecretRequest", "UpdateSecretRequest", "UpdateSecretVersionRequest", diff --git a/scaleway/scaleway/secret/v1beta1/types.py b/scaleway/scaleway/secret/v1beta1/types.py index 17f547b62..bd99f7dc3 100644 --- a/scaleway/scaleway/secret/v1beta1/types.py +++ b/scaleway/scaleway/secret/v1beta1/types.py @@ -378,6 +378,19 @@ class AddSecretOwnerRequest: """ +@dataclass +class BasicCredentials: + username: str + """ + The username or identifier associated with the credentials. + """ + + password: str + """ + The password associated with the credentials. + """ + + @dataclass class BrowseSecretsRequest: prefix: str @@ -506,6 +519,39 @@ class CreateSecretVersionRequest: """ +@dataclass +class DatabaseCredentials: + engine: str + """ + Supported database engines are: 'postgres', 'mysql', 'other'. + """ + + username: str + """ + The username used to authenticate to the database server. + """ + + password: str + """ + The password used to authenticate to the database server. + """ + + host: str + """ + The hostname or resolvable DNS name of the database server. + """ + + dbname: str + """ + The name of the database to connect to. + """ + + port: str + """ + The port must be an integer ranging from 0 to 65535. + """ + + @dataclass class DeleteSecretRequest: secret_id: str @@ -757,53 +803,7 @@ class ProtectSecretRequest: @dataclass -class SecretTypeBasicCredentials: - username: str - """ - The username or identifier associated with the credentials. - """ - - password: str - """ - The password associated with the credentials. - """ - - -@dataclass -class SecretTypeDatabaseCredentials: - engine: str - """ - Supported database engines are: 'postgres', 'mysql', 'other'. - """ - - username: str - """ - The username used to authenticate to the database server. - """ - - password: str - """ - The password used to authenticate to the database server. - """ - - host: str - """ - The hostname or resolvable DNS name of the database server. - """ - - dbname: str - """ - The name of the database to connect to. - """ - - port: str - """ - The port must be an integer ranging from 0 to 65535. - """ - - -@dataclass -class SecretTypeSSHKey: +class SSHKey: ssh_private_key: str """ The private SSH key. From 9c2a08a836c20e6bbe4824ed338367de2816013d Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Wed, 17 Apr 2024 15:19:43 +0200 Subject: [PATCH 21/25] docs(instance): mark some fields as deprecated following ipmob introduction (#494) --- .../scaleway_async/instance/v1/api.py | 28 +++++++------ .../scaleway_async/instance/v1/marshalling.py | 20 +++++----- .../scaleway_async/instance/v1/types.py | 39 +++++++++++-------- .../instance/v1/types_private.py | 14 +++---- scaleway/scaleway/instance/v1/api.py | 28 +++++++------ scaleway/scaleway/instance/v1/marshalling.py | 20 +++++----- scaleway/scaleway/instance/v1/types.py | 39 +++++++++++-------- .../scaleway/instance/v1/types_private.py | 14 +++---- 8 files changed, 110 insertions(+), 92 deletions(-) diff --git a/scaleway-async/scaleway_async/instance/v1/api.py b/scaleway-async/scaleway_async/instance/v1/api.py index 2ff5c17e8..173b1c1c5 100644 --- a/scaleway-async/scaleway_async/instance/v1/api.py +++ b/scaleway-async/scaleway_async/instance/v1/api.py @@ -366,6 +366,7 @@ async def list_servers( name: Optional[str] = None, private_ip: Optional[str] = None, without_ip: Optional[bool] = None, + with_ip: Optional[str] = None, commercial_type: Optional[str] = None, state: Optional[ServerState] = None, tags: Optional[List[str]] = None, @@ -386,6 +387,7 @@ async def list_servers( :param name: Filter Instances by name (eg. "server1" will return "server100" and "server1" but not "foo"). :param private_ip: List Instances by private_ip. :param without_ip: List Instances that are not attached to a public IP. + :param with_ip: List Instances by IP (both private_ip and public_ip are supported). :param commercial_type: List Instances of this commercial type. :param state: List Instances in this state. :param tags: List Instances with these exact tags (to filter with several tags, use commas to separate them). @@ -424,6 +426,7 @@ async def list_servers( "servers": ",".join(servers) if servers and len(servers) > 0 else None, "state": state, "tags": ",".join(tags) if tags and len(tags) > 0 else None, + "with_ip": with_ip, "without_ip": without_ip, }, ) @@ -442,6 +445,7 @@ async def list_servers_all( name: Optional[str] = None, private_ip: Optional[str] = None, without_ip: Optional[bool] = None, + with_ip: Optional[str] = None, commercial_type: Optional[str] = None, state: Optional[ServerState] = None, tags: Optional[List[str]] = None, @@ -462,6 +466,7 @@ async def list_servers_all( :param name: Filter Instances by name (eg. "server1" will return "server100" and "server1" but not "foo"). :param private_ip: List Instances by private_ip. :param without_ip: List Instances that are not attached to a public IP. + :param with_ip: List Instances by IP (both private_ip and public_ip are supported). :param commercial_type: List Instances of this commercial type. :param state: List Instances in this state. :param tags: List Instances with these exact tags (to filter with several tags, use commas to separate them). @@ -491,6 +496,7 @@ async def list_servers_all( "name": name, "private_ip": private_ip, "without_ip": without_ip, + "with_ip": with_ip, "commercial_type": commercial_type, "state": state, "tags": tags, @@ -507,12 +513,12 @@ async def _create_server( *, commercial_type: str, image: str, - enable_ipv6: bool, zone: Optional[Zone] = None, name: Optional[str] = None, dynamic_ip_required: Optional[bool] = None, routed_ip_enabled: Optional[bool] = None, volumes: Optional[Dict[str, VolumeServerTemplate]] = None, + enable_ipv6: Optional[bool] = None, public_ip: Optional[str] = None, public_ips: Optional[List[str]] = None, boot_type: Optional[BootType] = None, @@ -529,12 +535,12 @@ async def _create_server( Get more information in the [Technical Information](#technical-information) section of the introduction. :param commercial_type: Define the Instance commercial type (i.e. GP1-S). :param image: Instance image ID or label. - :param enable_ipv6: True if IPv6 is enabled on the server. :param zone: Zone to target. If none is passed will use default zone from the config. :param name: Instance name. :param dynamic_ip_required: Define if a dynamic IPv4 is required for the Instance. :param routed_ip_enabled: If true, configure the Instance so it uses the new routed IP mode. :param volumes: Volumes attached to the server. + :param enable_ipv6: True if IPv6 is enabled on the server (deprecated and always `False` when `routed_ip_enabled` is `True`). :param public_ip: ID of the reserved IP to attach to the Instance. :param public_ips: A list of reserved IP IDs to attach to the Instance. :param boot_type: Boot type to use. @@ -554,7 +560,6 @@ async def _create_server( result = await api._create_server( commercial_type="example", image="example", - enable_ipv6=False, ) """ @@ -565,14 +570,14 @@ async def _create_server( f"/instance/v1/zones/{param_zone}/servers", body=marshal_CreateServerRequest( CreateServerRequest( - zone=zone, commercial_type=commercial_type, image=image, + zone=zone, name=name or random_name(prefix="srv"), dynamic_ip_required=dynamic_ip_required, routed_ip_enabled=routed_ip_enabled, - enable_ipv6=enable_ipv6, volumes=volumes, + enable_ipv6=enable_ipv6, public_ip=public_ip, public_ips=public_ips, boot_type=boot_type, @@ -660,7 +665,6 @@ async def _set_server( name: str, commercial_type: str, dynamic_ip_required: bool, - enable_ipv6: bool, hostname: str, organization: Optional[str] = None, project: Optional[str] = None, @@ -668,6 +672,7 @@ async def _set_server( tags: Optional[List[str]] = None, creation_date: Optional[datetime] = None, routed_ip_enabled: Optional[bool] = None, + enable_ipv6: Optional[bool] = None, image: Optional[Image] = None, protected: bool, private_ip: Optional[str] = None, @@ -693,7 +698,6 @@ async def _set_server( :param name: Instance name. :param commercial_type: Instance commercial type (eg. GP1-M). :param dynamic_ip_required: True if a dynamic IPv4 is required. - :param enable_ipv6: True if IPv6 is enabled. :param hostname: Instance host name. :param organization: Instance Organization ID. :param project: Instance Project ID. @@ -701,16 +705,17 @@ async def _set_server( :param tags: Tags associated with the Instance. :param creation_date: Instance creation date. :param routed_ip_enabled: True to configure the instance so it uses the new routed IP mode (once this is set to True you cannot set it back to False). + :param enable_ipv6: True if IPv6 is enabled (deprecated and always `False` when `routed_ip_enabled` is `True`). :param image: Provide information on the Instance image. :param protected: Instance protection option is activated. - :param private_ip: Instance private IP address. - :param public_ip: Information about the public IP. + :param private_ip: Instance private IP address (deprecated and always `null` when `routed_ip_enabled` is `True`). + :param public_ip: Information about the public IP (deprecated in favor of `public_ips`). :param public_ips: Information about all the public IPs attached to the server. :param modification_date: Instance modification date. :param state_detail: Instance state_detail. :param state: Instance state. :param location: Instance location. - :param ipv6: Instance IPv6 address. + :param ipv6: Instance IPv6 address (deprecated when `routed_ip_enabled` is `True`). :param bootscript: Instance bootscript. :param boot_type: Instance boot type. :param volumes: Instance volumes. @@ -729,7 +734,6 @@ async def _set_server( name="example", commercial_type="example", dynamic_ip_required=False, - enable_ipv6=False, hostname="example", protected=False, state_detail="example", @@ -749,7 +753,6 @@ async def _set_server( name=name, commercial_type=commercial_type, dynamic_ip_required=dynamic_ip_required, - enable_ipv6=enable_ipv6, hostname=hostname, organization=organization, project=project, @@ -757,6 +760,7 @@ async def _set_server( tags=tags, creation_date=creation_date, routed_ip_enabled=routed_ip_enabled, + enable_ipv6=enable_ipv6, image=image, protected=protected, private_ip=private_ip, diff --git a/scaleway-async/scaleway_async/instance/v1/marshalling.py b/scaleway-async/scaleway_async/instance/v1/marshalling.py index 0d7765d58..e5fc59e05 100644 --- a/scaleway-async/scaleway_async/instance/v1/marshalling.py +++ b/scaleway-async/scaleway_async/instance/v1/marshalling.py @@ -741,10 +741,6 @@ def unmarshal_Server(data: Any) -> Server: if field is not None: args["routed_ip_enabled"] = field - field = data.get("enable_ipv6", None) - if field is not None: - args["enable_ipv6"] = field - field = data.get("hostname", None) if field is not None: args["hostname"] = field @@ -753,6 +749,10 @@ def unmarshal_Server(data: Any) -> Server: if field is not None: args["protected"] = field + field = data.get("enable_ipv6", None) + if field is not None: + args["enable_ipv6"] = field + field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) @@ -2877,15 +2877,15 @@ def marshal_CreateServerRequest( if request.routed_ip_enabled is not None: output["routed_ip_enabled"] = request.routed_ip_enabled - if request.enable_ipv6 is not None: - output["enable_ipv6"] = request.enable_ipv6 - if request.volumes is not None: output["volumes"] = { key: marshal_VolumeServerTemplate(value, defaults) for key, value in request.volumes.items() } + if request.enable_ipv6 is not None: + output["enable_ipv6"] = request.enable_ipv6 + if request.public_ip is not None: output["public_ip"] = request.public_ip @@ -3972,9 +3972,6 @@ def marshal__SetServerRequest( if request.dynamic_ip_required is not None: output["dynamic_ip_required"] = request.dynamic_ip_required - if request.enable_ipv6 is not None: - output["enable_ipv6"] = request.enable_ipv6 - if request.hostname is not None: output["hostname"] = request.hostname @@ -3998,6 +3995,9 @@ def marshal__SetServerRequest( if request.routed_ip_enabled is not None: output["routed_ip_enabled"] = request.routed_ip_enabled + if request.enable_ipv6 is not None: + output["enable_ipv6"] = request.enable_ipv6 + if request.image is not None: output["image"] = (marshal_Image(request.image, defaults),) diff --git a/scaleway-async/scaleway_async/instance/v1/types.py b/scaleway-async/scaleway_async/instance/v1/types.py index ce054860d..4ef95d229 100644 --- a/scaleway-async/scaleway_async/instance/v1/types.py +++ b/scaleway-async/scaleway_async/instance/v1/types.py @@ -842,11 +842,6 @@ class Server: True to configure the instance so it uses the new routed IP mode. """ - enable_ipv6: bool - """ - True if IPv6 is enabled. - """ - hostname: str """ Instance host name. @@ -857,6 +852,11 @@ class Server: Defines whether the Instance protection option is activated. """ + enable_ipv6: Optional[bool] + """ + True if IPv6 is enabled (deprecated and always `False` when `routed_ip_enabled` is `True`). + """ + image: Optional[Image] """ Information about the Instance image. @@ -864,12 +864,12 @@ class Server: private_ip: Optional[str] """ - Private IP address of the Instance. + Private IP address of the Instance (deprecated and always `null` when `routed_ip_enabled` is `True`). """ public_ip: Optional[ServerIp] """ - Information about the public IP. + Information about the public IP (deprecated in favor of `public_ips`). """ public_ips: List[ServerIp] @@ -909,7 +909,7 @@ class Server: ipv6: Optional[ServerIpv6] """ - Instance IPv6 address. + Instance IPv6 address (deprecated when `routed_ip_enabled` is `True`). """ bootscript: Optional[Bootscript] @@ -1783,11 +1783,6 @@ class CreateSecurityGroupRuleResponse: @dataclass class CreateServerRequest: - zone: Optional[Zone] - """ - Zone to target. If none is passed will use default zone from the config. - """ - commercial_type: str """ Define the Instance commercial type (i.e. GP1-S). @@ -1798,6 +1793,11 @@ class CreateServerRequest: Instance image ID or label. """ + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + name: Optional[str] """ Instance name. @@ -1813,14 +1813,14 @@ class CreateServerRequest: If true, configure the Instance so it uses the new routed IP mode. """ - enable_ipv6: bool + volumes: Optional[Dict[str, VolumeServerTemplate]] """ - True if IPv6 is enabled on the server. + Volumes attached to the server. """ - volumes: Optional[Dict[str, VolumeServerTemplate]] + enable_ipv6: Optional[bool] """ - Volumes attached to the server. + True if IPv6 is enabled on the server (deprecated and always `False` when `routed_ip_enabled` is `True`). """ public_ip: Optional[str] @@ -2774,6 +2774,11 @@ class ListServersRequest: List Instances that are not attached to a public IP. """ + with_ip: Optional[str] + """ + List Instances by IP (both private_ip and public_ip are supported). + """ + commercial_type: Optional[str] """ List Instances of this commercial type. diff --git a/scaleway-async/scaleway_async/instance/v1/types_private.py b/scaleway-async/scaleway_async/instance/v1/types_private.py index 9acfb25e0..da16440bc 100644 --- a/scaleway-async/scaleway_async/instance/v1/types_private.py +++ b/scaleway-async/scaleway_async/instance/v1/types_private.py @@ -164,10 +164,6 @@ class _SetServerRequest: """ True if a dynamic IPv4 is required. """ - enable_ipv6: bool - """ - True if IPv6 is enabled. - """ hostname: str """ Instance host name. @@ -196,6 +192,10 @@ class _SetServerRequest: """ True to configure the instance so it uses the new routed IP mode (once this is set to True you cannot set it back to False). """ + enable_ipv6: Optional[bool] + """ + True if IPv6 is enabled (deprecated and always `False` when `routed_ip_enabled` is `True`). + """ image: Optional[Image] """ Provide information on the Instance image. @@ -206,11 +206,11 @@ class _SetServerRequest: """ private_ip: Optional[str] """ - Instance private IP address. + Instance private IP address (deprecated and always `null` when `routed_ip_enabled` is `True`). """ public_ip: Optional[ServerIp] """ - Information about the public IP. + Information about the public IP (deprecated in favor of `public_ips`). """ public_ips: Optional[List[ServerIp]] """ @@ -234,7 +234,7 @@ class _SetServerRequest: """ ipv6: Optional[ServerIpv6] """ - Instance IPv6 address. + Instance IPv6 address (deprecated when `routed_ip_enabled` is `True`). """ bootscript: Optional[Bootscript] """ diff --git a/scaleway/scaleway/instance/v1/api.py b/scaleway/scaleway/instance/v1/api.py index 0400e08bb..1615fa455 100644 --- a/scaleway/scaleway/instance/v1/api.py +++ b/scaleway/scaleway/instance/v1/api.py @@ -366,6 +366,7 @@ def list_servers( name: Optional[str] = None, private_ip: Optional[str] = None, without_ip: Optional[bool] = None, + with_ip: Optional[str] = None, commercial_type: Optional[str] = None, state: Optional[ServerState] = None, tags: Optional[List[str]] = None, @@ -386,6 +387,7 @@ def list_servers( :param name: Filter Instances by name (eg. "server1" will return "server100" and "server1" but not "foo"). :param private_ip: List Instances by private_ip. :param without_ip: List Instances that are not attached to a public IP. + :param with_ip: List Instances by IP (both private_ip and public_ip are supported). :param commercial_type: List Instances of this commercial type. :param state: List Instances in this state. :param tags: List Instances with these exact tags (to filter with several tags, use commas to separate them). @@ -424,6 +426,7 @@ def list_servers( "servers": ",".join(servers) if servers and len(servers) > 0 else None, "state": state, "tags": ",".join(tags) if tags and len(tags) > 0 else None, + "with_ip": with_ip, "without_ip": without_ip, }, ) @@ -442,6 +445,7 @@ def list_servers_all( name: Optional[str] = None, private_ip: Optional[str] = None, without_ip: Optional[bool] = None, + with_ip: Optional[str] = None, commercial_type: Optional[str] = None, state: Optional[ServerState] = None, tags: Optional[List[str]] = None, @@ -462,6 +466,7 @@ def list_servers_all( :param name: Filter Instances by name (eg. "server1" will return "server100" and "server1" but not "foo"). :param private_ip: List Instances by private_ip. :param without_ip: List Instances that are not attached to a public IP. + :param with_ip: List Instances by IP (both private_ip and public_ip are supported). :param commercial_type: List Instances of this commercial type. :param state: List Instances in this state. :param tags: List Instances with these exact tags (to filter with several tags, use commas to separate them). @@ -491,6 +496,7 @@ def list_servers_all( "name": name, "private_ip": private_ip, "without_ip": without_ip, + "with_ip": with_ip, "commercial_type": commercial_type, "state": state, "tags": tags, @@ -507,12 +513,12 @@ def _create_server( *, commercial_type: str, image: str, - enable_ipv6: bool, zone: Optional[Zone] = None, name: Optional[str] = None, dynamic_ip_required: Optional[bool] = None, routed_ip_enabled: Optional[bool] = None, volumes: Optional[Dict[str, VolumeServerTemplate]] = None, + enable_ipv6: Optional[bool] = None, public_ip: Optional[str] = None, public_ips: Optional[List[str]] = None, boot_type: Optional[BootType] = None, @@ -529,12 +535,12 @@ def _create_server( Get more information in the [Technical Information](#technical-information) section of the introduction. :param commercial_type: Define the Instance commercial type (i.e. GP1-S). :param image: Instance image ID or label. - :param enable_ipv6: True if IPv6 is enabled on the server. :param zone: Zone to target. If none is passed will use default zone from the config. :param name: Instance name. :param dynamic_ip_required: Define if a dynamic IPv4 is required for the Instance. :param routed_ip_enabled: If true, configure the Instance so it uses the new routed IP mode. :param volumes: Volumes attached to the server. + :param enable_ipv6: True if IPv6 is enabled on the server (deprecated and always `False` when `routed_ip_enabled` is `True`). :param public_ip: ID of the reserved IP to attach to the Instance. :param public_ips: A list of reserved IP IDs to attach to the Instance. :param boot_type: Boot type to use. @@ -554,7 +560,6 @@ def _create_server( result = api._create_server( commercial_type="example", image="example", - enable_ipv6=False, ) """ @@ -565,14 +570,14 @@ def _create_server( f"/instance/v1/zones/{param_zone}/servers", body=marshal_CreateServerRequest( CreateServerRequest( - zone=zone, commercial_type=commercial_type, image=image, + zone=zone, name=name or random_name(prefix="srv"), dynamic_ip_required=dynamic_ip_required, routed_ip_enabled=routed_ip_enabled, - enable_ipv6=enable_ipv6, volumes=volumes, + enable_ipv6=enable_ipv6, public_ip=public_ip, public_ips=public_ips, boot_type=boot_type, @@ -660,7 +665,6 @@ def _set_server( name: str, commercial_type: str, dynamic_ip_required: bool, - enable_ipv6: bool, hostname: str, organization: Optional[str] = None, project: Optional[str] = None, @@ -668,6 +672,7 @@ def _set_server( tags: Optional[List[str]] = None, creation_date: Optional[datetime] = None, routed_ip_enabled: Optional[bool] = None, + enable_ipv6: Optional[bool] = None, image: Optional[Image] = None, protected: bool, private_ip: Optional[str] = None, @@ -693,7 +698,6 @@ def _set_server( :param name: Instance name. :param commercial_type: Instance commercial type (eg. GP1-M). :param dynamic_ip_required: True if a dynamic IPv4 is required. - :param enable_ipv6: True if IPv6 is enabled. :param hostname: Instance host name. :param organization: Instance Organization ID. :param project: Instance Project ID. @@ -701,16 +705,17 @@ def _set_server( :param tags: Tags associated with the Instance. :param creation_date: Instance creation date. :param routed_ip_enabled: True to configure the instance so it uses the new routed IP mode (once this is set to True you cannot set it back to False). + :param enable_ipv6: True if IPv6 is enabled (deprecated and always `False` when `routed_ip_enabled` is `True`). :param image: Provide information on the Instance image. :param protected: Instance protection option is activated. - :param private_ip: Instance private IP address. - :param public_ip: Information about the public IP. + :param private_ip: Instance private IP address (deprecated and always `null` when `routed_ip_enabled` is `True`). + :param public_ip: Information about the public IP (deprecated in favor of `public_ips`). :param public_ips: Information about all the public IPs attached to the server. :param modification_date: Instance modification date. :param state_detail: Instance state_detail. :param state: Instance state. :param location: Instance location. - :param ipv6: Instance IPv6 address. + :param ipv6: Instance IPv6 address (deprecated when `routed_ip_enabled` is `True`). :param bootscript: Instance bootscript. :param boot_type: Instance boot type. :param volumes: Instance volumes. @@ -729,7 +734,6 @@ def _set_server( name="example", commercial_type="example", dynamic_ip_required=False, - enable_ipv6=False, hostname="example", protected=False, state_detail="example", @@ -749,7 +753,6 @@ def _set_server( name=name, commercial_type=commercial_type, dynamic_ip_required=dynamic_ip_required, - enable_ipv6=enable_ipv6, hostname=hostname, organization=organization, project=project, @@ -757,6 +760,7 @@ def _set_server( tags=tags, creation_date=creation_date, routed_ip_enabled=routed_ip_enabled, + enable_ipv6=enable_ipv6, image=image, protected=protected, private_ip=private_ip, diff --git a/scaleway/scaleway/instance/v1/marshalling.py b/scaleway/scaleway/instance/v1/marshalling.py index 0d7765d58..e5fc59e05 100644 --- a/scaleway/scaleway/instance/v1/marshalling.py +++ b/scaleway/scaleway/instance/v1/marshalling.py @@ -741,10 +741,6 @@ def unmarshal_Server(data: Any) -> Server: if field is not None: args["routed_ip_enabled"] = field - field = data.get("enable_ipv6", None) - if field is not None: - args["enable_ipv6"] = field - field = data.get("hostname", None) if field is not None: args["hostname"] = field @@ -753,6 +749,10 @@ def unmarshal_Server(data: Any) -> Server: if field is not None: args["protected"] = field + field = data.get("enable_ipv6", None) + if field is not None: + args["enable_ipv6"] = field + field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) @@ -2877,15 +2877,15 @@ def marshal_CreateServerRequest( if request.routed_ip_enabled is not None: output["routed_ip_enabled"] = request.routed_ip_enabled - if request.enable_ipv6 is not None: - output["enable_ipv6"] = request.enable_ipv6 - if request.volumes is not None: output["volumes"] = { key: marshal_VolumeServerTemplate(value, defaults) for key, value in request.volumes.items() } + if request.enable_ipv6 is not None: + output["enable_ipv6"] = request.enable_ipv6 + if request.public_ip is not None: output["public_ip"] = request.public_ip @@ -3972,9 +3972,6 @@ def marshal__SetServerRequest( if request.dynamic_ip_required is not None: output["dynamic_ip_required"] = request.dynamic_ip_required - if request.enable_ipv6 is not None: - output["enable_ipv6"] = request.enable_ipv6 - if request.hostname is not None: output["hostname"] = request.hostname @@ -3998,6 +3995,9 @@ def marshal__SetServerRequest( if request.routed_ip_enabled is not None: output["routed_ip_enabled"] = request.routed_ip_enabled + if request.enable_ipv6 is not None: + output["enable_ipv6"] = request.enable_ipv6 + if request.image is not None: output["image"] = (marshal_Image(request.image, defaults),) diff --git a/scaleway/scaleway/instance/v1/types.py b/scaleway/scaleway/instance/v1/types.py index ce054860d..4ef95d229 100644 --- a/scaleway/scaleway/instance/v1/types.py +++ b/scaleway/scaleway/instance/v1/types.py @@ -842,11 +842,6 @@ class Server: True to configure the instance so it uses the new routed IP mode. """ - enable_ipv6: bool - """ - True if IPv6 is enabled. - """ - hostname: str """ Instance host name. @@ -857,6 +852,11 @@ class Server: Defines whether the Instance protection option is activated. """ + enable_ipv6: Optional[bool] + """ + True if IPv6 is enabled (deprecated and always `False` when `routed_ip_enabled` is `True`). + """ + image: Optional[Image] """ Information about the Instance image. @@ -864,12 +864,12 @@ class Server: private_ip: Optional[str] """ - Private IP address of the Instance. + Private IP address of the Instance (deprecated and always `null` when `routed_ip_enabled` is `True`). """ public_ip: Optional[ServerIp] """ - Information about the public IP. + Information about the public IP (deprecated in favor of `public_ips`). """ public_ips: List[ServerIp] @@ -909,7 +909,7 @@ class Server: ipv6: Optional[ServerIpv6] """ - Instance IPv6 address. + Instance IPv6 address (deprecated when `routed_ip_enabled` is `True`). """ bootscript: Optional[Bootscript] @@ -1783,11 +1783,6 @@ class CreateSecurityGroupRuleResponse: @dataclass class CreateServerRequest: - zone: Optional[Zone] - """ - Zone to target. If none is passed will use default zone from the config. - """ - commercial_type: str """ Define the Instance commercial type (i.e. GP1-S). @@ -1798,6 +1793,11 @@ class CreateServerRequest: Instance image ID or label. """ + zone: Optional[Zone] + """ + Zone to target. If none is passed will use default zone from the config. + """ + name: Optional[str] """ Instance name. @@ -1813,14 +1813,14 @@ class CreateServerRequest: If true, configure the Instance so it uses the new routed IP mode. """ - enable_ipv6: bool + volumes: Optional[Dict[str, VolumeServerTemplate]] """ - True if IPv6 is enabled on the server. + Volumes attached to the server. """ - volumes: Optional[Dict[str, VolumeServerTemplate]] + enable_ipv6: Optional[bool] """ - Volumes attached to the server. + True if IPv6 is enabled on the server (deprecated and always `False` when `routed_ip_enabled` is `True`). """ public_ip: Optional[str] @@ -2774,6 +2774,11 @@ class ListServersRequest: List Instances that are not attached to a public IP. """ + with_ip: Optional[str] + """ + List Instances by IP (both private_ip and public_ip are supported). + """ + commercial_type: Optional[str] """ List Instances of this commercial type. diff --git a/scaleway/scaleway/instance/v1/types_private.py b/scaleway/scaleway/instance/v1/types_private.py index 9acfb25e0..da16440bc 100644 --- a/scaleway/scaleway/instance/v1/types_private.py +++ b/scaleway/scaleway/instance/v1/types_private.py @@ -164,10 +164,6 @@ class _SetServerRequest: """ True if a dynamic IPv4 is required. """ - enable_ipv6: bool - """ - True if IPv6 is enabled. - """ hostname: str """ Instance host name. @@ -196,6 +192,10 @@ class _SetServerRequest: """ True to configure the instance so it uses the new routed IP mode (once this is set to True you cannot set it back to False). """ + enable_ipv6: Optional[bool] + """ + True if IPv6 is enabled (deprecated and always `False` when `routed_ip_enabled` is `True`). + """ image: Optional[Image] """ Provide information on the Instance image. @@ -206,11 +206,11 @@ class _SetServerRequest: """ private_ip: Optional[str] """ - Instance private IP address. + Instance private IP address (deprecated and always `null` when `routed_ip_enabled` is `True`). """ public_ip: Optional[ServerIp] """ - Information about the public IP. + Information about the public IP (deprecated in favor of `public_ips`). """ public_ips: Optional[List[ServerIp]] """ @@ -234,7 +234,7 @@ class _SetServerRequest: """ ipv6: Optional[ServerIpv6] """ - Instance IPv6 address. + Instance IPv6 address (deprecated when `routed_ip_enabled` is `True`). """ bootscript: Optional[Bootscript] """ From d08fbab4f5c9a4d00d9f72d55711cc233917c2db Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Wed, 17 Apr 2024 15:24:01 +0200 Subject: [PATCH 22/25] feat(cockpit): add v1 api (#495) --- .../scaleway_async/cockpit/v1/__init__.py | 121 ++ .../scaleway_async/cockpit/v1/api.py | 1402 +++++++++++++++++ .../scaleway_async/cockpit/v1/marshalling.py | 730 +++++++++ .../scaleway_async/cockpit/v1/types.py | 1135 +++++++++++++ scaleway/scaleway/cockpit/v1/__init__.py | 121 ++ scaleway/scaleway/cockpit/v1/api.py | 1402 +++++++++++++++++ scaleway/scaleway/cockpit/v1/marshalling.py | 730 +++++++++ scaleway/scaleway/cockpit/v1/types.py | 1135 +++++++++++++ 8 files changed, 6776 insertions(+) create mode 100644 scaleway-async/scaleway_async/cockpit/v1/__init__.py create mode 100644 scaleway-async/scaleway_async/cockpit/v1/api.py create mode 100644 scaleway-async/scaleway_async/cockpit/v1/marshalling.py create mode 100644 scaleway-async/scaleway_async/cockpit/v1/types.py create mode 100644 scaleway/scaleway/cockpit/v1/__init__.py create mode 100644 scaleway/scaleway/cockpit/v1/api.py create mode 100644 scaleway/scaleway/cockpit/v1/marshalling.py create mode 100644 scaleway/scaleway/cockpit/v1/types.py diff --git a/scaleway-async/scaleway_async/cockpit/v1/__init__.py b/scaleway-async/scaleway_async/cockpit/v1/__init__.py new file mode 100644 index 000000000..bb88a5303 --- /dev/null +++ b/scaleway-async/scaleway_async/cockpit/v1/__init__.py @@ -0,0 +1,121 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from .types import DataSourceOrigin +from .types import DataSourceType +from .types import GrafanaUserRole +from .types import ListDataSourcesRequestOrderBy +from .types import ListGrafanaUsersRequestOrderBy +from .types import ListPlansRequestOrderBy +from .types import ListTokensRequestOrderBy +from .types import PlanName +from .types import TokenScope +from .types import UsageUnit +from .types import ContactPointEmail +from .types import ContactPoint +from .types import DataSource +from .types import GrafanaProductDashboard +from .types import GrafanaUser +from .types import Plan +from .types import Token +from .types import Usage +from .types import AlertManager +from .types import GlobalApiCreateGrafanaUserRequest +from .types import GlobalApiDeleteGrafanaUserRequest +from .types import GlobalApiGetCurrentPlanRequest +from .types import GlobalApiGetGrafanaProductDashboardRequest +from .types import GlobalApiGetGrafanaRequest +from .types import GlobalApiListGrafanaProductDashboardsRequest +from .types import GlobalApiListGrafanaUsersRequest +from .types import GlobalApiListPlansRequest +from .types import GlobalApiResetGrafanaUserPasswordRequest +from .types import GlobalApiSelectPlanRequest +from .types import GlobalApiSyncGrafanaDataSourcesRequest +from .types import Grafana +from .types import ListContactPointsResponse +from .types import ListDataSourcesResponse +from .types import ListGrafanaProductDashboardsResponse +from .types import ListGrafanaUsersResponse +from .types import ListPlansResponse +from .types import ListTokensResponse +from .types import RegionalApiCreateContactPointRequest +from .types import RegionalApiCreateDataSourceRequest +from .types import RegionalApiCreateTokenRequest +from .types import RegionalApiDeleteContactPointRequest +from .types import RegionalApiDeleteDataSourceRequest +from .types import RegionalApiDeleteTokenRequest +from .types import RegionalApiDisableAlertManagerRequest +from .types import RegionalApiDisableManagedAlertsRequest +from .types import RegionalApiEnableAlertManagerRequest +from .types import RegionalApiEnableManagedAlertsRequest +from .types import RegionalApiGetAlertManagerRequest +from .types import RegionalApiGetDataSourceRequest +from .types import RegionalApiGetTokenRequest +from .types import RegionalApiGetUsageOverviewRequest +from .types import RegionalApiListContactPointsRequest +from .types import RegionalApiListDataSourcesRequest +from .types import RegionalApiListTokensRequest +from .types import RegionalApiTriggerTestAlertRequest +from .types import UsageOverview +from .api import CockpitV1GlobalAPI +from .api import CockpitV1RegionalAPI + +__all__ = [ + "DataSourceOrigin", + "DataSourceType", + "GrafanaUserRole", + "ListDataSourcesRequestOrderBy", + "ListGrafanaUsersRequestOrderBy", + "ListPlansRequestOrderBy", + "ListTokensRequestOrderBy", + "PlanName", + "TokenScope", + "UsageUnit", + "ContactPointEmail", + "ContactPoint", + "DataSource", + "GrafanaProductDashboard", + "GrafanaUser", + "Plan", + "Token", + "Usage", + "AlertManager", + "GlobalApiCreateGrafanaUserRequest", + "GlobalApiDeleteGrafanaUserRequest", + "GlobalApiGetCurrentPlanRequest", + "GlobalApiGetGrafanaProductDashboardRequest", + "GlobalApiGetGrafanaRequest", + "GlobalApiListGrafanaProductDashboardsRequest", + "GlobalApiListGrafanaUsersRequest", + "GlobalApiListPlansRequest", + "GlobalApiResetGrafanaUserPasswordRequest", + "GlobalApiSelectPlanRequest", + "GlobalApiSyncGrafanaDataSourcesRequest", + "Grafana", + "ListContactPointsResponse", + "ListDataSourcesResponse", + "ListGrafanaProductDashboardsResponse", + "ListGrafanaUsersResponse", + "ListPlansResponse", + "ListTokensResponse", + "RegionalApiCreateContactPointRequest", + "RegionalApiCreateDataSourceRequest", + "RegionalApiCreateTokenRequest", + "RegionalApiDeleteContactPointRequest", + "RegionalApiDeleteDataSourceRequest", + "RegionalApiDeleteTokenRequest", + "RegionalApiDisableAlertManagerRequest", + "RegionalApiDisableManagedAlertsRequest", + "RegionalApiEnableAlertManagerRequest", + "RegionalApiEnableManagedAlertsRequest", + "RegionalApiGetAlertManagerRequest", + "RegionalApiGetDataSourceRequest", + "RegionalApiGetTokenRequest", + "RegionalApiGetUsageOverviewRequest", + "RegionalApiListContactPointsRequest", + "RegionalApiListDataSourcesRequest", + "RegionalApiListTokensRequest", + "RegionalApiTriggerTestAlertRequest", + "UsageOverview", + "CockpitV1GlobalAPI", + "CockpitV1RegionalAPI", +] diff --git a/scaleway-async/scaleway_async/cockpit/v1/api.py b/scaleway-async/scaleway_async/cockpit/v1/api.py new file mode 100644 index 000000000..8f289c7e0 --- /dev/null +++ b/scaleway-async/scaleway_async/cockpit/v1/api.py @@ -0,0 +1,1402 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import List, Optional + +from scaleway_core.api import API +from scaleway_core.bridge import ( + Region, +) +from scaleway_core.utils import ( + validate_path_param, + fetch_all_pages_async, +) +from .types import ( + DataSourceOrigin, + DataSourceType, + GrafanaUserRole, + ListDataSourcesRequestOrderBy, + ListGrafanaUsersRequestOrderBy, + ListPlansRequestOrderBy, + ListTokensRequestOrderBy, + PlanName, + TokenScope, + AlertManager, + ContactPoint, + ContactPointEmail, + DataSource, + GlobalApiCreateGrafanaUserRequest, + GlobalApiResetGrafanaUserPasswordRequest, + GlobalApiSelectPlanRequest, + GlobalApiSyncGrafanaDataSourcesRequest, + Grafana, + GrafanaProductDashboard, + GrafanaUser, + ListContactPointsResponse, + ListDataSourcesResponse, + ListGrafanaProductDashboardsResponse, + ListGrafanaUsersResponse, + ListPlansResponse, + ListTokensResponse, + Plan, + RegionalApiCreateContactPointRequest, + RegionalApiCreateDataSourceRequest, + RegionalApiCreateTokenRequest, + RegionalApiDeleteContactPointRequest, + RegionalApiDisableAlertManagerRequest, + RegionalApiDisableManagedAlertsRequest, + RegionalApiEnableAlertManagerRequest, + RegionalApiEnableManagedAlertsRequest, + RegionalApiTriggerTestAlertRequest, + Token, + UsageOverview, +) +from .marshalling import ( + unmarshal_ContactPoint, + unmarshal_DataSource, + unmarshal_GrafanaProductDashboard, + unmarshal_GrafanaUser, + unmarshal_Plan, + unmarshal_Token, + unmarshal_AlertManager, + unmarshal_Grafana, + unmarshal_ListContactPointsResponse, + unmarshal_ListDataSourcesResponse, + unmarshal_ListGrafanaProductDashboardsResponse, + unmarshal_ListGrafanaUsersResponse, + unmarshal_ListPlansResponse, + unmarshal_ListTokensResponse, + unmarshal_UsageOverview, + marshal_GlobalApiCreateGrafanaUserRequest, + marshal_GlobalApiResetGrafanaUserPasswordRequest, + marshal_GlobalApiSelectPlanRequest, + marshal_GlobalApiSyncGrafanaDataSourcesRequest, + marshal_RegionalApiCreateContactPointRequest, + marshal_RegionalApiCreateDataSourceRequest, + marshal_RegionalApiCreateTokenRequest, + marshal_RegionalApiDeleteContactPointRequest, + marshal_RegionalApiDisableAlertManagerRequest, + marshal_RegionalApiDisableManagedAlertsRequest, + marshal_RegionalApiEnableAlertManagerRequest, + marshal_RegionalApiEnableManagedAlertsRequest, + marshal_RegionalApiTriggerTestAlertRequest, +) + + +class CockpitV1GlobalAPI(API): + """ + The Cockpit Global API allows you to manage your Cockpit's Grafana and plans. + """ + + async def get_grafana( + self, + *, + project_id: Optional[str] = None, + ) -> Grafana: + """ + Get your Cockpit's Grafana. + Retrieve information on your Cockpit's Grafana, specified by the ID of the Project the Cockpit belongs to. + The output returned displays the URL to access your Cockpit's Grafana. + :param project_id: ID of the Project. + :return: :class:`Grafana ` + + Usage: + :: + + result = await api.get_grafana() + """ + + res = self._request( + "GET", + "/cockpit/v1/grafana", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_Grafana(res.json()) + + async def sync_grafana_data_sources( + self, + *, + project_id: Optional[str] = None, + ) -> None: + """ + Synchronize Grafana data sources. + Trigger the synchronization of all your data sources and the alert manager in the relevant regions. The alert manager will only be synchronized if you have enabled it. + :param project_id: ID of the Project to target. + + Usage: + :: + + result = await api.sync_grafana_data_sources() + """ + + res = self._request( + "POST", + "/cockpit/v1/grafana/sync-data-sources", + body=marshal_GlobalApiSyncGrafanaDataSourcesRequest( + GlobalApiSyncGrafanaDataSourcesRequest( + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def create_grafana_user( + self, + *, + login: str, + project_id: Optional[str] = None, + role: Optional[GrafanaUserRole] = None, + ) -> GrafanaUser: + """ + Create a Grafana user. + Create a Grafana user to connect to your Cockpit's Grafana. Upon creation, your user password displays only once, so make sure that you save it. + Each Grafana user is associated with a role: viewer or editor. A viewer can only view dashboards, whereas an editor can create and edit dashboards. Note that the `admin` username is not available for creation. + :param login: Username of the Grafana user. Note that the `admin` username is not available for creation. + :param project_id: ID of the Project in which to create the Grafana user. + :param role: Role assigned to the Grafana user. + :return: :class:`GrafanaUser ` + + Usage: + :: + + result = await api.create_grafana_user( + login="example", + ) + """ + + res = self._request( + "POST", + "/cockpit/v1/grafana/users", + body=marshal_GlobalApiCreateGrafanaUserRequest( + GlobalApiCreateGrafanaUserRequest( + login=login, + project_id=project_id, + role=role, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_GrafanaUser(res.json()) + + async def list_grafana_users( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListGrafanaUsersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListGrafanaUsersResponse: + """ + List Grafana users. + List all Grafana users created in your Cockpit's Grafana. By default, the Grafana users returned in the list are ordered in ascending order. + :param page: Page number. + :param page_size: Page size. + :param order_by: Order of the Grafana users. + :param project_id: ID of the Project to target. + :return: :class:`ListGrafanaUsersResponse ` + + Usage: + :: + + result = await api.list_grafana_users() + """ + + res = self._request( + "GET", + "/cockpit/v1/grafana/users", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListGrafanaUsersResponse(res.json()) + + async def list_grafana_users_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListGrafanaUsersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[GrafanaUser]: + """ + List Grafana users. + List all Grafana users created in your Cockpit's Grafana. By default, the Grafana users returned in the list are ordered in ascending order. + :param page: Page number. + :param page_size: Page size. + :param order_by: Order of the Grafana users. + :param project_id: ID of the Project to target. + :return: :class:`List[GrafanaUser] ` + + Usage: + :: + + result = await api.list_grafana_users_all() + """ + + return await fetch_all_pages_async( + type=ListGrafanaUsersResponse, + key="grafana_users", + fetcher=self.list_grafana_users, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + async def delete_grafana_user( + self, + *, + project_id: Optional[str] = None, + grafana_user_id: int, + ) -> None: + """ + Delete a Grafana user. + Delete a Grafana user from your Cockpit's Grafana, specified by the ID of the Project the Cockpit belongs to, and the ID of the Grafana user. + :param project_id: ID of the Project to target. + :param grafana_user_id: ID of the Grafana user. + + Usage: + :: + + result = await api.delete_grafana_user( + grafana_user_id=1, + ) + """ + + param_grafana_user_id = validate_path_param("grafana_user_id", grafana_user_id) + + res = self._request( + "DELETE", + f"/cockpit/v1/grafana/users/{param_grafana_user_id}", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + + async def reset_grafana_user_password( + self, + *, + project_id: Optional[str] = None, + grafana_user_id: int, + ) -> GrafanaUser: + """ + Reset a Grafana user password. + Reset the password of a Grafana user, specified by the ID of the Project the Cockpit belongs to, and the ID of the Grafana user. + A new password regenerates and only displays once. Make sure that you save it. + :param project_id: ID of the Project to target. + :param grafana_user_id: ID of the Grafana user. + :return: :class:`GrafanaUser ` + + Usage: + :: + + result = await api.reset_grafana_user_password( + grafana_user_id=1, + ) + """ + + param_grafana_user_id = validate_path_param("grafana_user_id", grafana_user_id) + + res = self._request( + "POST", + f"/cockpit/v1/grafana/users/{param_grafana_user_id}/reset-password", + body=marshal_GlobalApiResetGrafanaUserPasswordRequest( + GlobalApiResetGrafanaUserPasswordRequest( + project_id=project_id, + grafana_user_id=grafana_user_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_GrafanaUser(res.json()) + + async def list_grafana_product_dashboards( + self, + *, + project_id: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + tags: Optional[List[str]] = None, + ) -> ListGrafanaProductDashboardsResponse: + """ + List Scaleway resources dashboards. + Retrieve a list of available dashboards in Grafana, for all Scaleway resources which are integrated with Cockpit. + :param project_id: ID of the Project to target. + :param page: Page number. + :param page_size: Page size. + :param tags: Tags to filter for. + :return: :class:`ListGrafanaProductDashboardsResponse ` + + Usage: + :: + + result = await api.list_grafana_product_dashboards() + """ + + res = self._request( + "GET", + "/cockpit/v1/grafana/product-dashboards", + params={ + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "tags": tags, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListGrafanaProductDashboardsResponse(res.json()) + + async def list_grafana_product_dashboards_all( + self, + *, + project_id: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + tags: Optional[List[str]] = None, + ) -> List[GrafanaProductDashboard]: + """ + List Scaleway resources dashboards. + Retrieve a list of available dashboards in Grafana, for all Scaleway resources which are integrated with Cockpit. + :param project_id: ID of the Project to target. + :param page: Page number. + :param page_size: Page size. + :param tags: Tags to filter for. + :return: :class:`List[GrafanaProductDashboard] ` + + Usage: + :: + + result = await api.list_grafana_product_dashboards_all() + """ + + return await fetch_all_pages_async( + type=ListGrafanaProductDashboardsResponse, + key="dashboards", + fetcher=self.list_grafana_product_dashboards, + args={ + "project_id": project_id, + "page": page, + "page_size": page_size, + "tags": tags, + }, + ) + + async def get_grafana_product_dashboard( + self, + *, + project_id: Optional[str] = None, + dashboard_name: str, + ) -> GrafanaProductDashboard: + """ + Get Scaleway resource dashboard. + Retrieve information about the dashboard of a Scaleway resource in Grafana, specified by the ID of the Project the Cockpit belongs to, and the name of the dashboard. + :param project_id: ID of the Project the dashboard belongs to. + :param dashboard_name: Name of the dashboard. + :return: :class:`GrafanaProductDashboard ` + + Usage: + :: + + result = await api.get_grafana_product_dashboard( + dashboard_name="example", + ) + """ + + param_dashboard_name = validate_path_param("dashboard_name", dashboard_name) + + res = self._request( + "GET", + f"/cockpit/v1/grafana/product-dashboards/{param_dashboard_name}", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_GrafanaProductDashboard(res.json()) + + async def list_plans( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListPlansRequestOrderBy] = None, + ) -> ListPlansResponse: + """ + List plan types. + Retrieve a list of available pricing plan types. + :param page: Page number. + :param page_size: Page size. + :param order_by: + :return: :class:`ListPlansResponse ` + + Usage: + :: + + result = await api.list_plans() + """ + + res = self._request( + "GET", + "/cockpit/v1/plans", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListPlansResponse(res.json()) + + async def list_plans_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListPlansRequestOrderBy] = None, + ) -> List[Plan]: + """ + List plan types. + Retrieve a list of available pricing plan types. + :param page: Page number. + :param page_size: Page size. + :param order_by: + :return: :class:`List[Plan] ` + + Usage: + :: + + result = await api.list_plans_all() + """ + + return await fetch_all_pages_async( + type=ListPlansResponse, + key="plans", + fetcher=self.list_plans, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + }, + ) + + async def select_plan( + self, + *, + project_id: Optional[str] = None, + plan_name: Optional[PlanName] = None, + ) -> Plan: + """ + Apply a pricing plan. + Apply a pricing plan on a given Project. You must specify the ID of the pricing plan type. Note that you will be billed for the plan you apply. + :param project_id: ID of the Project. + :param plan_name: Name of the pricing plan. + :return: :class:`Plan ` + + Usage: + :: + + result = await api.select_plan() + """ + + res = self._request( + "PATCH", + "/cockpit/v1/plans", + body=marshal_GlobalApiSelectPlanRequest( + GlobalApiSelectPlanRequest( + project_id=project_id, + plan_name=plan_name, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Plan(res.json()) + + async def get_current_plan( + self, + *, + project_id: Optional[str] = None, + ) -> Plan: + """ + Get current plan. + Retrieve a pricing plan for the given Project, specified by the ID of the Project. + :param project_id: ID of the Project. + :return: :class:`Plan ` + + Usage: + :: + + result = await api.get_current_plan() + """ + + res = self._request( + "GET", + "/cockpit/v1/current-plan", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_Plan(res.json()) + + +class CockpitV1RegionalAPI(API): + """ + The Cockpit Regional API allows you to create data sources and tokens to store and query data types such as metrics, logs, and traces. You can also push your data into Cockpit, and send alerts to your contact points when your resources may require your attention, using the regional Alert manager. + """ + + async def create_data_source( + self, + *, + name: str, + region: Optional[Region] = None, + project_id: Optional[str] = None, + type_: Optional[DataSourceType] = None, + ) -> DataSource: + """ + Create a data source. + You must specify the data source type upon creation. Available data source types include: + - metrics + - logs + - traces + The name of the data source will then be used as reference to name the associated Grafana data source. + :param name: Data source name. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project the data source belongs to. + :param type_: Data source type. + :return: :class:`DataSource ` + + Usage: + :: + + result = await api.create_data_source( + name="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/data-sources", + body=marshal_RegionalApiCreateDataSourceRequest( + RegionalApiCreateDataSourceRequest( + name=name, + region=region, + project_id=project_id, + type_=type_, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_DataSource(res.json()) + + async def get_data_source( + self, + *, + data_source_id: str, + region: Optional[Region] = None, + ) -> DataSource: + """ + Get a data source. + Retrieve information about a given data source, specified by the data source ID. The data source's information such as its name, type, URL, origin, and retention period, is returned. + :param data_source_id: ID of the relevant data source. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`DataSource ` + + Usage: + :: + + result = await api.get_data_source( + data_source_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_data_source_id = validate_path_param("data_source_id", data_source_id) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/data-sources/{param_data_source_id}", + ) + + self._throw_on_error(res) + return unmarshal_DataSource(res.json()) + + async def delete_data_source( + self, + *, + data_source_id: str, + region: Optional[Region] = None, + ) -> None: + """ + Delete a data source. + Delete a given data source, specified by the data source ID. Note that deleting a data source is irreversible, and cannot be undone. + :param data_source_id: ID of the data source to delete. + :param region: Region to target. If none is passed will use default region from the config. + + Usage: + :: + + result = await api.delete_data_source( + data_source_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_data_source_id = validate_path_param("data_source_id", data_source_id) + + res = self._request( + "DELETE", + f"/cockpit/v1/regions/{param_region}/data-sources/{param_data_source_id}", + ) + + self._throw_on_error(res) + + async def list_data_sources( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListDataSourcesRequestOrderBy] = None, + project_id: Optional[str] = None, + origin: Optional[DataSourceOrigin] = None, + types: Optional[List[DataSourceType]] = None, + ) -> ListDataSourcesResponse: + """ + List data sources. + Retrieve the list of data sources available in the specified region. By default, the data sources returned in the list are ordered by creation date, in ascending order. + You can list data sources by Project, type and origin. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Number of data sources to return per page. + :param order_by: Sort order for data sources in the response. + :param project_id: Project ID to filter for, only data sources from this Project will be returned. + :param origin: Origin to filter for, only data sources with matching origin will be returned. + :param types: Types to filter for, only data sources with matching types will be returned. + :return: :class:`ListDataSourcesResponse ` + + Usage: + :: + + result = await api.list_data_sources() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/data-sources", + params={ + "order_by": order_by, + "origin": origin, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "types": types, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListDataSourcesResponse(res.json()) + + async def list_data_sources_all( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListDataSourcesRequestOrderBy] = None, + project_id: Optional[str] = None, + origin: Optional[DataSourceOrigin] = None, + types: Optional[List[DataSourceType]] = None, + ) -> List[DataSource]: + """ + List data sources. + Retrieve the list of data sources available in the specified region. By default, the data sources returned in the list are ordered by creation date, in ascending order. + You can list data sources by Project, type and origin. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Number of data sources to return per page. + :param order_by: Sort order for data sources in the response. + :param project_id: Project ID to filter for, only data sources from this Project will be returned. + :param origin: Origin to filter for, only data sources with matching origin will be returned. + :param types: Types to filter for, only data sources with matching types will be returned. + :return: :class:`List[DataSource] ` + + Usage: + :: + + result = await api.list_data_sources_all() + """ + + return await fetch_all_pages_async( + type=ListDataSourcesResponse, + key="data_sources", + fetcher=self.list_data_sources, + args={ + "region": region, + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + "origin": origin, + "types": types, + }, + ) + + async def get_usage_overview( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + interval: Optional[str] = None, + ) -> UsageOverview: + """ + Get data source usage overview. + Retrieve the data source usage overview per type for the specified Project. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: + :param interval: + :return: :class:`UsageOverview ` + + Usage: + :: + + result = await api.get_usage_overview() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/usage-overview", + params={ + "interval": interval, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_UsageOverview(res.json()) + + async def create_token( + self, + *, + name: str, + region: Optional[Region] = None, + project_id: Optional[str] = None, + token_scopes: Optional[List[TokenScope]] = None, + ) -> Token: + """ + Create a token. + Give your token the relevant scopes to ensure it has the right permissions to interact with your data sources and the Alert manager. Make sure that you create your token in the same regions as the data sources you want to use it for. + Upon creation, your token's secret key display only once. Make sure that you save it. + :param name: Name of the token. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project the token belongs to. + :param token_scopes: Token permission scopes. + :return: :class:`Token ` + + Usage: + :: + + result = await api.create_token( + name="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/tokens", + body=marshal_RegionalApiCreateTokenRequest( + RegionalApiCreateTokenRequest( + name=name, + region=region, + project_id=project_id, + token_scopes=token_scopes, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Token(res.json()) + + async def list_tokens( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListTokensRequestOrderBy] = None, + project_id: Optional[str] = None, + token_scopes: Optional[List[TokenScope]] = None, + ) -> ListTokensResponse: + """ + List tokens. + Retrieve a list of all tokens in the specified region. By default, tokens returned in the list are ordered by creation date, in ascending order. + You can filter tokens by Project ID and token scopes. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Number of tokens to return per page. + :param order_by: Order in which to return results. + :param project_id: ID of the Project the tokens belong to. + :param token_scopes: Token scopes to filter for. + :return: :class:`ListTokensResponse ` + + Usage: + :: + + result = await api.list_tokens() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/tokens", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "token_scopes": token_scopes, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListTokensResponse(res.json()) + + async def list_tokens_all( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListTokensRequestOrderBy] = None, + project_id: Optional[str] = None, + token_scopes: Optional[List[TokenScope]] = None, + ) -> List[Token]: + """ + List tokens. + Retrieve a list of all tokens in the specified region. By default, tokens returned in the list are ordered by creation date, in ascending order. + You can filter tokens by Project ID and token scopes. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Number of tokens to return per page. + :param order_by: Order in which to return results. + :param project_id: ID of the Project the tokens belong to. + :param token_scopes: Token scopes to filter for. + :return: :class:`List[Token] ` + + Usage: + :: + + result = await api.list_tokens_all() + """ + + return await fetch_all_pages_async( + type=ListTokensResponse, + key="tokens", + fetcher=self.list_tokens, + args={ + "region": region, + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + "token_scopes": token_scopes, + }, + ) + + async def get_token( + self, + *, + token_id: str, + region: Optional[Region] = None, + ) -> Token: + """ + Get a token. + Retrieve information about a given token, specified by the token ID. The token's information such as its scopes, is returned. + :param token_id: Token ID. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`Token ` + + Usage: + :: + + result = await api.get_token( + token_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_token_id = validate_path_param("token_id", token_id) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/tokens/{param_token_id}", + ) + + self._throw_on_error(res) + return unmarshal_Token(res.json()) + + async def delete_token( + self, + *, + token_id: str, + region: Optional[Region] = None, + ) -> None: + """ + Delete a token. + Delete a given token, specified by the token ID. Deleting a token is irreversible and cannot be undone. + :param token_id: ID of the token to delete. + :param region: Region to target. If none is passed will use default region from the config. + + Usage: + :: + + result = await api.delete_token( + token_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_token_id = validate_path_param("token_id", token_id) + + res = self._request( + "DELETE", + f"/cockpit/v1/regions/{param_region}/tokens/{param_token_id}", + ) + + self._throw_on_error(res) + + async def get_alert_manager( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AlertManager: + """ + Get the Alert manager. + Retrieve information about the Alert manager which is unique per Project and region. By default the Alert manager is disabled. + The output returned displays a URL to access the Alert manager, and whether the Alert manager and managed alerts are enabled. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: Project ID of the requested Alert manager. + :return: :class:`AlertManager ` + + Usage: + :: + + result = await api.get_alert_manager() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/alert-manager", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_AlertManager(res.json()) + + async def enable_alert_manager( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AlertManager: + """ + Enable the Alert manager. + Enabling the Alert manager allows you to enable managed alerts and create contact points in the specified Project and region, to be notified when your Scaleway resources may require your attention. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project to enable the Alert manager in. + :return: :class:`AlertManager ` + + Usage: + :: + + result = await api.enable_alert_manager() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/enable", + body=marshal_RegionalApiEnableAlertManagerRequest( + RegionalApiEnableAlertManagerRequest( + region=region, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_AlertManager(res.json()) + + async def disable_alert_manager( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AlertManager: + """ + Disable the Alert manager. + Disabling the Alert manager deletes the contact points you have created and disables managed alerts in the specified Project and region. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project to disable the Alert manager in. + :return: :class:`AlertManager ` + + Usage: + :: + + result = await api.disable_alert_manager() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/disable", + body=marshal_RegionalApiDisableAlertManagerRequest( + RegionalApiDisableAlertManagerRequest( + region=region, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_AlertManager(res.json()) + + async def create_contact_point( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + email: Optional[ContactPointEmail] = None, + ) -> ContactPoint: + """ + Create a contact point. + Contact points are email addresses associated with the default receiver, that the Alert manager sends alerts to. + The source of the alerts are data sources within the same Project and region as the Alert manager. + If you need to receive alerts for other receivers, you can create additional contact points and receivers in Grafana. Make sure that you select the Scaleway Alert manager. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project to create the contact point in. + :param email: Email address of the contact point to create. + One-Of ('configuration'): at most one of 'email' could be set. + :return: :class:`ContactPoint ` + + Usage: + :: + + result = await api.create_contact_point() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/contact-points", + body=marshal_RegionalApiCreateContactPointRequest( + RegionalApiCreateContactPointRequest( + region=region, + project_id=project_id, + email=email, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_ContactPoint(res.json()) + + async def list_contact_points( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + project_id: Optional[str] = None, + ) -> ListContactPointsResponse: + """ + List contact points. + Retrieve a list of contact points for the specified Project. The response lists all contact points and receivers created in Grafana or via the API. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Total count of contact points to return per page. + :param project_id: ID of the Project containing the contact points to list. + :return: :class:`ListContactPointsResponse ` + + Usage: + :: + + result = await api.list_contact_points() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/alert-manager/contact-points", + params={ + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListContactPointsResponse(res.json()) + + async def list_contact_points_all( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + project_id: Optional[str] = None, + ) -> List[ContactPoint]: + """ + List contact points. + Retrieve a list of contact points for the specified Project. The response lists all contact points and receivers created in Grafana or via the API. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Total count of contact points to return per page. + :param project_id: ID of the Project containing the contact points to list. + :return: :class:`List[ContactPoint] ` + + Usage: + :: + + result = await api.list_contact_points_all() + """ + + return await fetch_all_pages_async( + type=ListContactPointsResponse, + key="contact_points", + fetcher=self.list_contact_points, + args={ + "region": region, + "page": page, + "page_size": page_size, + "project_id": project_id, + }, + ) + + async def delete_contact_point( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + email: Optional[ContactPointEmail] = None, + ) -> None: + """ + Delete a contact point. + Delete a contact point associated with the default receiver. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project containing the contact point to delete. + :param email: Email address of the contact point to delete. + One-Of ('configuration'): at most one of 'email' could be set. + + Usage: + :: + + result = await api.delete_contact_point() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/contact-points/delete", + body=marshal_RegionalApiDeleteContactPointRequest( + RegionalApiDeleteContactPointRequest( + region=region, + project_id=project_id, + email=email, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + async def enable_managed_alerts( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AlertManager: + """ + Enable managed alerts. + Enable the sending of managed alerts for the specified Project. Managed alerts are predefined alerts that apply to Scaleway recources integrated with Cockpit by default. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project. + :return: :class:`AlertManager ` + + Usage: + :: + + result = await api.enable_managed_alerts() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/managed-alerts/enable", + body=marshal_RegionalApiEnableManagedAlertsRequest( + RegionalApiEnableManagedAlertsRequest( + region=region, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_AlertManager(res.json()) + + async def disable_managed_alerts( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AlertManager: + """ + Disable managed alerts. + Disable the sending of managed alerts for the specified Project. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project. + :return: :class:`AlertManager ` + + Usage: + :: + + result = await api.disable_managed_alerts() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/managed-alerts/disable", + body=marshal_RegionalApiDisableManagedAlertsRequest( + RegionalApiDisableManagedAlertsRequest( + region=region, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_AlertManager(res.json()) + + async def trigger_test_alert( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> None: + """ + Trigger a test alert. + Send a test alert to the Alert manager to make sure your contact points get notified. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project. + + Usage: + :: + + result = await api.trigger_test_alert() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/trigger-test-alert", + body=marshal_RegionalApiTriggerTestAlertRequest( + RegionalApiTriggerTestAlertRequest( + region=region, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) diff --git a/scaleway-async/scaleway_async/cockpit/v1/marshalling.py b/scaleway-async/scaleway_async/cockpit/v1/marshalling.py new file mode 100644 index 000000000..12bc2dd4b --- /dev/null +++ b/scaleway-async/scaleway_async/cockpit/v1/marshalling.py @@ -0,0 +1,730 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import Any, Dict +from dateutil import parser + +from scaleway_core.profile import ProfileDefaults +from scaleway_core.utils import ( + OneOfPossibility, + resolve_one_of, +) +from .types import ( + TokenScope, + ContactPointEmail, + ContactPoint, + DataSource, + GrafanaProductDashboard, + GrafanaUser, + Plan, + Token, + AlertManager, + Grafana, + ListContactPointsResponse, + ListDataSourcesResponse, + ListGrafanaProductDashboardsResponse, + ListGrafanaUsersResponse, + ListPlansResponse, + ListTokensResponse, + Usage, + UsageOverview, + GlobalApiCreateGrafanaUserRequest, + GlobalApiResetGrafanaUserPasswordRequest, + GlobalApiSelectPlanRequest, + GlobalApiSyncGrafanaDataSourcesRequest, + RegionalApiCreateContactPointRequest, + RegionalApiCreateDataSourceRequest, + RegionalApiCreateTokenRequest, + RegionalApiDeleteContactPointRequest, + RegionalApiDisableAlertManagerRequest, + RegionalApiDisableManagedAlertsRequest, + RegionalApiEnableAlertManagerRequest, + RegionalApiEnableManagedAlertsRequest, + RegionalApiTriggerTestAlertRequest, +) + + +def unmarshal_ContactPointEmail(data: Any) -> ContactPointEmail: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ContactPointEmail' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("to", None) + if field is not None: + args["to"] = field + + return ContactPointEmail(**args) + + +def unmarshal_ContactPoint(data: Any) -> ContactPoint: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ContactPoint' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("region", None) + if field is not None: + args["region"] = field + + field = data.get("email", None) + if field is not None: + args["email"] = unmarshal_ContactPointEmail(field) + + return ContactPoint(**args) + + +def unmarshal_DataSource(data: Any) -> DataSource: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'DataSource' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("url", None) + if field is not None: + args["url"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("origin", None) + if field is not None: + args["origin"] = field + + field = data.get("synchronized_with_grafana", None) + if field is not None: + args["synchronized_with_grafana"] = field + + field = data.get("region", None) + if field is not None: + args["region"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return DataSource(**args) + + +def unmarshal_GrafanaProductDashboard(data: Any) -> GrafanaProductDashboard: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GrafanaProductDashboard' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("title", None) + if field is not None: + args["title"] = field + + field = data.get("url", None) + if field is not None: + args["url"] = field + + field = data.get("tags", None) + if field is not None: + args["tags"] = field + + field = data.get("variables", None) + if field is not None: + args["variables"] = field + + return GrafanaProductDashboard(**args) + + +def unmarshal_GrafanaUser(data: Any) -> GrafanaUser: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GrafanaUser' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("login", None) + if field is not None: + args["login"] = field + + field = data.get("role", None) + if field is not None: + args["role"] = field + + field = data.get("password", None) + if field is not None: + args["password"] = field + + return GrafanaUser(**args) + + +def unmarshal_Plan(data: Any) -> Plan: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Plan' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("sample_ingestion_price", None) + if field is not None: + args["sample_ingestion_price"] = field + + field = data.get("logs_ingestion_price", None) + if field is not None: + args["logs_ingestion_price"] = field + + field = data.get("traces_ingestion_price", None) + if field is not None: + args["traces_ingestion_price"] = field + + field = data.get("monthly_price", None) + if field is not None: + args["monthly_price"] = field + + field = data.get("retention_metrics_interval", None) + if field is not None: + args["retention_metrics_interval"] = field + + field = data.get("retention_logs_interval", None) + if field is not None: + args["retention_logs_interval"] = field + + field = data.get("retention_traces_interval", None) + if field is not None: + args["retention_traces_interval"] = field + + return Plan(**args) + + +def unmarshal_Token(data: Any) -> Token: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Token' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("scopes", None) + if field is not None: + args["scopes"] = [TokenScope(v) for v in field] if field is not None else None + + field = data.get("region", None) + if field is not None: + args["region"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("secret_key", None) + if field is not None: + args["secret_key"] = field + + return Token(**args) + + +def unmarshal_AlertManager(data: Any) -> AlertManager: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'AlertManager' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("alert_manager_enabled", None) + if field is not None: + args["alert_manager_enabled"] = field + + field = data.get("managed_alerts_enabled", None) + if field is not None: + args["managed_alerts_enabled"] = field + + field = data.get("region", None) + if field is not None: + args["region"] = field + + field = data.get("alert_manager_url", None) + if field is not None: + args["alert_manager_url"] = field + + return AlertManager(**args) + + +def unmarshal_Grafana(data: Any) -> Grafana: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Grafana' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("grafana_url", None) + if field is not None: + args["grafana_url"] = field + + return Grafana(**args) + + +def unmarshal_ListContactPointsResponse(data: Any) -> ListContactPointsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListContactPointsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("contact_points", None) + if field is not None: + args["contact_points"] = ( + [unmarshal_ContactPoint(v) for v in field] if field is not None else None + ) + + field = data.get("has_additional_receivers", None) + if field is not None: + args["has_additional_receivers"] = field + + field = data.get("has_additional_contact_points", None) + if field is not None: + args["has_additional_contact_points"] = field + + return ListContactPointsResponse(**args) + + +def unmarshal_ListDataSourcesResponse(data: Any) -> ListDataSourcesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListDataSourcesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("data_sources", None) + if field is not None: + args["data_sources"] = ( + [unmarshal_DataSource(v) for v in field] if field is not None else None + ) + + return ListDataSourcesResponse(**args) + + +def unmarshal_ListGrafanaProductDashboardsResponse( + data: Any, +) -> ListGrafanaProductDashboardsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListGrafanaProductDashboardsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("dashboards", None) + if field is not None: + args["dashboards"] = ( + [unmarshal_GrafanaProductDashboard(v) for v in field] + if field is not None + else None + ) + + return ListGrafanaProductDashboardsResponse(**args) + + +def unmarshal_ListGrafanaUsersResponse(data: Any) -> ListGrafanaUsersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListGrafanaUsersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("grafana_users", None) + if field is not None: + args["grafana_users"] = ( + [unmarshal_GrafanaUser(v) for v in field] if field is not None else None + ) + + return ListGrafanaUsersResponse(**args) + + +def unmarshal_ListPlansResponse(data: Any) -> ListPlansResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListPlansResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("plans", None) + if field is not None: + args["plans"] = ( + [unmarshal_Plan(v) for v in field] if field is not None else None + ) + + return ListPlansResponse(**args) + + +def unmarshal_ListTokensResponse(data: Any) -> ListTokensResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListTokensResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("tokens", None) + if field is not None: + args["tokens"] = ( + [unmarshal_Token(v) for v in field] if field is not None else None + ) + + return ListTokensResponse(**args) + + +def unmarshal_Usage(data: Any) -> Usage: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Usage' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("data_source_origin", None) + if field is not None: + args["data_source_origin"] = field + + field = data.get("data_source_type", None) + if field is not None: + args["data_source_type"] = field + + field = data.get("unit", None) + if field is not None: + args["unit"] = field + + field = data.get("quantity_over_interval", None) + if field is not None: + args["quantity_over_interval"] = field + + field = data.get("region", None) + if field is not None: + args["region"] = field + + field = data.get("data_source_id", None) + if field is not None: + args["data_source_id"] = field + + field = data.get("interval", None) + if field is not None: + args["interval"] = field + + return Usage(**args) + + +def unmarshal_UsageOverview(data: Any) -> UsageOverview: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'UsageOverview' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("scaleway_metrics_usage", None) + if field is not None: + args["scaleway_metrics_usage"] = unmarshal_Usage(field) + + field = data.get("scaleway_logs_usage", None) + if field is not None: + args["scaleway_logs_usage"] = unmarshal_Usage(field) + + field = data.get("external_metrics_usage", None) + if field is not None: + args["external_metrics_usage"] = unmarshal_Usage(field) + + field = data.get("external_logs_usage", None) + if field is not None: + args["external_logs_usage"] = unmarshal_Usage(field) + + field = data.get("external_traces_usage", None) + if field is not None: + args["external_traces_usage"] = unmarshal_Usage(field) + + return UsageOverview(**args) + + +def marshal_GlobalApiCreateGrafanaUserRequest( + request: GlobalApiCreateGrafanaUserRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.login is not None: + output["login"] = request.login + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.role is not None: + output["role"] = str(request.role) + + return output + + +def marshal_GlobalApiResetGrafanaUserPasswordRequest( + request: GlobalApiResetGrafanaUserPasswordRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_GlobalApiSelectPlanRequest( + request: GlobalApiSelectPlanRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.plan_name is not None: + output["plan_name"] = str(request.plan_name) + + return output + + +def marshal_GlobalApiSyncGrafanaDataSourcesRequest( + request: GlobalApiSyncGrafanaDataSourcesRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_ContactPointEmail( + request: ContactPointEmail, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.to is not None: + output["to"] = request.to + + return output + + +def marshal_RegionalApiCreateContactPointRequest( + request: RegionalApiCreateContactPointRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + output.update( + resolve_one_of( + [ + OneOfPossibility("email", request.email), + ] + ), + ) + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiCreateDataSourceRequest( + request: RegionalApiCreateDataSourceRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.type_ is not None: + output["type"] = str(request.type_) + + return output + + +def marshal_RegionalApiCreateTokenRequest( + request: RegionalApiCreateTokenRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.token_scopes is not None: + output["token_scopes"] = [str(item) for item in request.token_scopes] + + return output + + +def marshal_RegionalApiDeleteContactPointRequest( + request: RegionalApiDeleteContactPointRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + output.update( + resolve_one_of( + [ + OneOfPossibility("email", request.email), + ] + ), + ) + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiDisableAlertManagerRequest( + request: RegionalApiDisableAlertManagerRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiDisableManagedAlertsRequest( + request: RegionalApiDisableManagedAlertsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiEnableAlertManagerRequest( + request: RegionalApiEnableAlertManagerRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiEnableManagedAlertsRequest( + request: RegionalApiEnableManagedAlertsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiTriggerTestAlertRequest( + request: RegionalApiTriggerTestAlertRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output diff --git a/scaleway-async/scaleway_async/cockpit/v1/types.py b/scaleway-async/scaleway_async/cockpit/v1/types.py new file mode 100644 index 000000000..e63357577 --- /dev/null +++ b/scaleway-async/scaleway_async/cockpit/v1/types.py @@ -0,0 +1,1135 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +from typing import List, Optional + +from scaleway_core.bridge import ( + Region, +) +from scaleway_core.utils import ( + StrEnumMeta, +) + + +class DataSourceOrigin(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_ORIGIN = "unknown_origin" + SCALEWAY = "scaleway" + EXTERNAL = "external" + + def __str__(self) -> str: + return str(self.value) + + +class DataSourceType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + METRICS = "metrics" + LOGS = "logs" + TRACES = "traces" + + def __str__(self) -> str: + return str(self.value) + + +class GrafanaUserRole(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_ROLE = "unknown_role" + EDITOR = "editor" + VIEWER = "viewer" + + def __str__(self) -> str: + return str(self.value) + + +class ListDataSourcesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + TYPE_ASC = "type_asc" + TYPE_DESC = "type_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListGrafanaUsersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + LOGIN_ASC = "login_asc" + LOGIN_DESC = "login_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListPlansRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListTokensRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + + def __str__(self) -> str: + return str(self.value) + + +class PlanName(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_NAME = "unknown_name" + FREE = "free" + PREMIUM = "premium" + CUSTOM = "custom" + + def __str__(self) -> str: + return str(self.value) + + +class TokenScope(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_SCOPE = "unknown_scope" + READ_ONLY_METRICS = "read_only_metrics" + WRITE_ONLY_METRICS = "write_only_metrics" + FULL_ACCESS_METRICS_RULES = "full_access_metrics_rules" + READ_ONLY_LOGS = "read_only_logs" + WRITE_ONLY_LOGS = "write_only_logs" + FULL_ACCESS_LOGS_RULES = "full_access_logs_rules" + FULL_ACCESS_ALERT_MANAGER = "full_access_alert_manager" + READ_ONLY_TRACES = "read_only_traces" + WRITE_ONLY_TRACES = "write_only_traces" + + def __str__(self) -> str: + return str(self.value) + + +class UsageUnit(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_UNIT = "unknown_unit" + BYTES = "bytes" + SAMPLES = "samples" + + def __str__(self) -> str: + return str(self.value) + + +@dataclass +class ContactPointEmail: + to: str + + +@dataclass +class ContactPoint: + """ + Contact point. + """ + + region: Region + """ + Region to target. If none is passed will use default region from the config. + """ + + email: Optional[ContactPointEmail] + + +@dataclass +class DataSource: + """ + Data source. + """ + + id: str + """ + ID of the data source. + """ + + project_id: str + """ + ID of the Project the data source belongs to. + """ + + name: str + """ + Data source name. + """ + + url: str + """ + Data source URL. + """ + + type_: DataSourceType + """ + Data source type. + """ + + origin: DataSourceOrigin + """ + Data source origin. + """ + + synchronized_with_grafana: bool + """ + Indicates whether the data source is synchronized with Grafana. + """ + + region: Region + """ + Region of the data source. + """ + + created_at: Optional[datetime] + """ + Date the data source was created. + """ + + updated_at: Optional[datetime] + """ + Date the data source was last updated. + """ + + +@dataclass +class GrafanaProductDashboard: + """ + Grafana dashboard. + """ + + name: str + """ + Dashboard name. + """ + + title: str + """ + Dashboard title. + """ + + url: str + """ + Dashboard URL. + """ + + tags: List[str] + """ + Dashboard tags. + """ + + variables: List[str] + """ + Dashboard variables. + """ + + +@dataclass +class GrafanaUser: + """ + Grafana user. + """ + + id: int + """ + ID of the Grafana user. + """ + + login: str + """ + Username of the Grafana user. + """ + + role: GrafanaUserRole + """ + Role assigned to the Grafana user. + """ + + password: Optional[str] + """ + Grafana user's password. + """ + + +@dataclass +class Plan: + """ + Type of pricing plan. + """ + + name: PlanName + """ + Name of a given pricing plan. + """ + + sample_ingestion_price: int + """ + Ingestion price in cents for 1 million samples. + """ + + logs_ingestion_price: int + """ + Ingestion price in cents for 1 GB of logs. + """ + + traces_ingestion_price: int + """ + Ingestion price in cents for 1 GB of traces. + """ + + monthly_price: int + """ + Retention price in euros per month. + """ + + retention_metrics_interval: Optional[str] + """ + Interval of time during which Scaleway's Cockpit keeps your metrics. + """ + + retention_logs_interval: Optional[str] + """ + Interval of time during which Scaleway's Cockpit keeps your logs. + """ + + retention_traces_interval: Optional[str] + """ + Interval of time during which Scaleway's Cockpit keeps your traces. + """ + + +@dataclass +class Token: + """ + Token. + """ + + id: str + """ + ID of the token. + """ + + project_id: str + """ + ID of the Project the token belongs to. + """ + + name: str + """ + Name of the token. + """ + + scopes: List[TokenScope] + """ + Token permission scopes. + """ + + region: Region + """ + Regions where the token is located. + """ + + created_at: Optional[datetime] + """ + Token creation date. + """ + + updated_at: Optional[datetime] + """ + Token last modification date. + """ + + secret_key: Optional[str] + """ + Token secret key. + """ + + +@dataclass +class Usage: + """ + Data source usage. + """ + + project_id: str + """ + ID of the Project the data source belongs to. + """ + + data_source_origin: DataSourceOrigin + """ + Origin of the data source. + """ + + data_source_type: DataSourceType + """ + Type of the data source. + """ + + unit: UsageUnit + """ + Unit of the data source usage. + """ + + quantity_over_interval: int + """ + Data source usage for the given interval. + """ + + region: Region + """ + Region of the data source usage. + """ + + data_source_id: Optional[str] + """ + ID of the data source. + """ + + interval: Optional[str] + """ + Interval for the data source usage. + """ + + +@dataclass +class AlertManager: + """ + Alert manager information. + """ + + alert_manager_enabled: bool + """ + The Alert manager is enabled. + """ + + managed_alerts_enabled: bool + """ + Managed alerts are enabled. + """ + + region: Region + """ + Regions where the Alert manager is enabled. + """ + + alert_manager_url: Optional[str] + """ + Alert manager URL. + """ + + +@dataclass +class GlobalApiCreateGrafanaUserRequest: + """ + Create a Grafana user. + """ + + login: str + """ + Username of the Grafana user. Note that the `admin` username is not available for creation. + """ + + project_id: Optional[str] + """ + ID of the Project in which to create the Grafana user. + """ + + role: Optional[GrafanaUserRole] + """ + Role assigned to the Grafana user. + """ + + +@dataclass +class GlobalApiDeleteGrafanaUserRequest: + """ + Delete a Grafana user. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + grafana_user_id: int + """ + ID of the Grafana user. + """ + + +@dataclass +class GlobalApiGetCurrentPlanRequest: + """ + Retrieve a pricing plan for the given Project. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + +@dataclass +class GlobalApiGetGrafanaProductDashboardRequest: + """ + Retrieve a specific dashboard. + """ + + project_id: Optional[str] + """ + ID of the Project the dashboard belongs to. + """ + + dashboard_name: str + """ + Name of the dashboard. + """ + + +@dataclass +class GlobalApiGetGrafanaRequest: + """ + Request a Grafana. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + +@dataclass +class GlobalApiListGrafanaProductDashboardsRequest: + """ + Retrieve a list of available product dashboards. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Page size. + """ + + tags: Optional[List[str]] + """ + Tags to filter for. + """ + + +@dataclass +class GlobalApiListGrafanaUsersRequest: + """ + List all Grafana users. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Page size. + """ + + order_by: Optional[ListGrafanaUsersRequestOrderBy] + """ + Order of the Grafana users. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + +@dataclass +class GlobalApiListPlansRequest: + """ + Retrieve a list of available pricing plans. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Page size. + """ + + order_by: Optional[ListPlansRequestOrderBy] + + +@dataclass +class GlobalApiResetGrafanaUserPasswordRequest: + """ + Reset a Grafana user's password. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + grafana_user_id: int + """ + ID of the Grafana user. + """ + + +@dataclass +class GlobalApiSelectPlanRequest: + """ + Select a specific pricing plan. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + plan_name: Optional[PlanName] + """ + Name of the pricing plan. + """ + + +@dataclass +class GlobalApiSyncGrafanaDataSourcesRequest: + """ + Trigger the synchronization of all data sources created in the relevant regions. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + +@dataclass +class Grafana: + """ + Grafana user. + """ + + grafana_url: str + """ + URL to access your Cockpit's Grafana. + """ + + +@dataclass +class ListContactPointsResponse: + """ + Response returned when listing contact points. + """ + + total_count: int + """ + Total count of contact points associated with the default receiver. + """ + + contact_points: List[ContactPoint] + """ + List of contact points associated with the default receiver. + """ + + has_additional_receivers: bool + """ + Indicates whether the Alert manager has other receivers than the default one. + """ + + has_additional_contact_points: bool + """ + Indicates whether there are unmanaged contact points on the default receiver. + """ + + +@dataclass +class ListDataSourcesResponse: + """ + Response returned when listing data sources. + """ + + total_count: int + """ + Total count of data sources matching the request. + """ + + data_sources: List[DataSource] + """ + Data sources matching the request within the pagination. + """ + + +@dataclass +class ListGrafanaProductDashboardsResponse: + """ + Output returned when listing dashboards. + """ + + total_count: int + """ + Total count of Grafana dashboards. + """ + + dashboards: List[GrafanaProductDashboard] + """ + Grafana dashboards information. + """ + + +@dataclass +class ListGrafanaUsersResponse: + """ + Ouptut returned when listing Grafana users. + """ + + total_count: int + """ + Total count of Grafana users. + """ + + grafana_users: List[GrafanaUser] + """ + Grafana users information. + """ + + +@dataclass +class ListPlansResponse: + """ + Output returned when listing pricing plans. + """ + + total_count: int + """ + Total count of available pricing plans. + """ + + plans: List[Plan] + """ + Plan types information. + """ + + +@dataclass +class ListTokensResponse: + """ + Response returned when listing tokens. + """ + + total_count: int + """ + Total count of tokens matching the request. + """ + + tokens: List[Token] + """ + Tokens matching the request within the pagination. + """ + + +@dataclass +class RegionalApiCreateContactPointRequest: + """ + Create a contact point. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project to create the contact point in. + """ + + email: Optional[ContactPointEmail] + + +@dataclass +class RegionalApiCreateDataSourceRequest: + """ + Create a data source. + """ + + name: str + """ + Data source name. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project the data source belongs to. + """ + + type_: Optional[DataSourceType] + """ + Data source type. + """ + + +@dataclass +class RegionalApiCreateTokenRequest: + """ + Create a token. + """ + + name: str + """ + Name of the token. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project the token belongs to. + """ + + token_scopes: Optional[List[TokenScope]] + """ + Token permission scopes. + """ + + +@dataclass +class RegionalApiDeleteContactPointRequest: + """ + Delete a contact point. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project containing the contact point to delete. + """ + + email: Optional[ContactPointEmail] + + +@dataclass +class RegionalApiDeleteDataSourceRequest: + """ + Delete a data source. + """ + + data_source_id: str + """ + ID of the data source to delete. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class RegionalApiDeleteTokenRequest: + """ + Delete a token. + """ + + token_id: str + """ + ID of the token to delete. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class RegionalApiDisableAlertManagerRequest: + """ + Disable the Alert manager. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project to disable the Alert manager in. + """ + + +@dataclass +class RegionalApiDisableManagedAlertsRequest: + """ + Disable the sending of managed alerts. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + +@dataclass +class RegionalApiEnableAlertManagerRequest: + """ + Enable the Alert manager. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project to enable the Alert manager in. + """ + + +@dataclass +class RegionalApiEnableManagedAlertsRequest: + """ + Enable the sending of managed alerts. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + +@dataclass +class RegionalApiGetAlertManagerRequest: + """ + Get the Alert manager. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + Project ID of the requested Alert manager. + """ + + +@dataclass +class RegionalApiGetDataSourceRequest: + """ + Retrieve a data source. + """ + + data_source_id: str + """ + ID of the relevant data source. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class RegionalApiGetTokenRequest: + """ + Get a token. + """ + + token_id: str + """ + Token ID. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class RegionalApiGetUsageOverviewRequest: + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + + interval: Optional[str] + + +@dataclass +class RegionalApiListContactPointsRequest: + """ + List contact points. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + page: Optional[int] + """ + Page number to return, from the paginated results. + """ + + page_size: Optional[int] + """ + Total count of contact points to return per page. + """ + + project_id: Optional[str] + """ + ID of the Project containing the contact points to list. + """ + + +@dataclass +class RegionalApiListDataSourcesRequest: + """ + List data sources. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + page: Optional[int] + """ + Page number to return, from the paginated results. + """ + + page_size: Optional[int] + """ + Number of data sources to return per page. + """ + + order_by: Optional[ListDataSourcesRequestOrderBy] + """ + Sort order for data sources in the response. + """ + + project_id: Optional[str] + """ + Project ID to filter for, only data sources from this Project will be returned. + """ + + origin: Optional[DataSourceOrigin] + """ + Origin to filter for, only data sources with matching origin will be returned. + """ + + types: Optional[List[DataSourceType]] + """ + Types to filter for, only data sources with matching types will be returned. + """ + + +@dataclass +class RegionalApiListTokensRequest: + """ + List tokens. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + page: Optional[int] + """ + Page number to return, from the paginated results. + """ + + page_size: Optional[int] + """ + Number of tokens to return per page. + """ + + order_by: Optional[ListTokensRequestOrderBy] + """ + Order in which to return results. + """ + + project_id: Optional[str] + """ + ID of the Project the tokens belong to. + """ + + token_scopes: Optional[List[TokenScope]] + """ + Token scopes to filter for. + """ + + +@dataclass +class RegionalApiTriggerTestAlertRequest: + """ + Request to trigger a test alert. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + +@dataclass +class UsageOverview: + scaleway_metrics_usage: Optional[Usage] + + scaleway_logs_usage: Optional[Usage] + + external_metrics_usage: Optional[Usage] + + external_logs_usage: Optional[Usage] + + external_traces_usage: Optional[Usage] diff --git a/scaleway/scaleway/cockpit/v1/__init__.py b/scaleway/scaleway/cockpit/v1/__init__.py new file mode 100644 index 000000000..bb88a5303 --- /dev/null +++ b/scaleway/scaleway/cockpit/v1/__init__.py @@ -0,0 +1,121 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from .types import DataSourceOrigin +from .types import DataSourceType +from .types import GrafanaUserRole +from .types import ListDataSourcesRequestOrderBy +from .types import ListGrafanaUsersRequestOrderBy +from .types import ListPlansRequestOrderBy +from .types import ListTokensRequestOrderBy +from .types import PlanName +from .types import TokenScope +from .types import UsageUnit +from .types import ContactPointEmail +from .types import ContactPoint +from .types import DataSource +from .types import GrafanaProductDashboard +from .types import GrafanaUser +from .types import Plan +from .types import Token +from .types import Usage +from .types import AlertManager +from .types import GlobalApiCreateGrafanaUserRequest +from .types import GlobalApiDeleteGrafanaUserRequest +from .types import GlobalApiGetCurrentPlanRequest +from .types import GlobalApiGetGrafanaProductDashboardRequest +from .types import GlobalApiGetGrafanaRequest +from .types import GlobalApiListGrafanaProductDashboardsRequest +from .types import GlobalApiListGrafanaUsersRequest +from .types import GlobalApiListPlansRequest +from .types import GlobalApiResetGrafanaUserPasswordRequest +from .types import GlobalApiSelectPlanRequest +from .types import GlobalApiSyncGrafanaDataSourcesRequest +from .types import Grafana +from .types import ListContactPointsResponse +from .types import ListDataSourcesResponse +from .types import ListGrafanaProductDashboardsResponse +from .types import ListGrafanaUsersResponse +from .types import ListPlansResponse +from .types import ListTokensResponse +from .types import RegionalApiCreateContactPointRequest +from .types import RegionalApiCreateDataSourceRequest +from .types import RegionalApiCreateTokenRequest +from .types import RegionalApiDeleteContactPointRequest +from .types import RegionalApiDeleteDataSourceRequest +from .types import RegionalApiDeleteTokenRequest +from .types import RegionalApiDisableAlertManagerRequest +from .types import RegionalApiDisableManagedAlertsRequest +from .types import RegionalApiEnableAlertManagerRequest +from .types import RegionalApiEnableManagedAlertsRequest +from .types import RegionalApiGetAlertManagerRequest +from .types import RegionalApiGetDataSourceRequest +from .types import RegionalApiGetTokenRequest +from .types import RegionalApiGetUsageOverviewRequest +from .types import RegionalApiListContactPointsRequest +from .types import RegionalApiListDataSourcesRequest +from .types import RegionalApiListTokensRequest +from .types import RegionalApiTriggerTestAlertRequest +from .types import UsageOverview +from .api import CockpitV1GlobalAPI +from .api import CockpitV1RegionalAPI + +__all__ = [ + "DataSourceOrigin", + "DataSourceType", + "GrafanaUserRole", + "ListDataSourcesRequestOrderBy", + "ListGrafanaUsersRequestOrderBy", + "ListPlansRequestOrderBy", + "ListTokensRequestOrderBy", + "PlanName", + "TokenScope", + "UsageUnit", + "ContactPointEmail", + "ContactPoint", + "DataSource", + "GrafanaProductDashboard", + "GrafanaUser", + "Plan", + "Token", + "Usage", + "AlertManager", + "GlobalApiCreateGrafanaUserRequest", + "GlobalApiDeleteGrafanaUserRequest", + "GlobalApiGetCurrentPlanRequest", + "GlobalApiGetGrafanaProductDashboardRequest", + "GlobalApiGetGrafanaRequest", + "GlobalApiListGrafanaProductDashboardsRequest", + "GlobalApiListGrafanaUsersRequest", + "GlobalApiListPlansRequest", + "GlobalApiResetGrafanaUserPasswordRequest", + "GlobalApiSelectPlanRequest", + "GlobalApiSyncGrafanaDataSourcesRequest", + "Grafana", + "ListContactPointsResponse", + "ListDataSourcesResponse", + "ListGrafanaProductDashboardsResponse", + "ListGrafanaUsersResponse", + "ListPlansResponse", + "ListTokensResponse", + "RegionalApiCreateContactPointRequest", + "RegionalApiCreateDataSourceRequest", + "RegionalApiCreateTokenRequest", + "RegionalApiDeleteContactPointRequest", + "RegionalApiDeleteDataSourceRequest", + "RegionalApiDeleteTokenRequest", + "RegionalApiDisableAlertManagerRequest", + "RegionalApiDisableManagedAlertsRequest", + "RegionalApiEnableAlertManagerRequest", + "RegionalApiEnableManagedAlertsRequest", + "RegionalApiGetAlertManagerRequest", + "RegionalApiGetDataSourceRequest", + "RegionalApiGetTokenRequest", + "RegionalApiGetUsageOverviewRequest", + "RegionalApiListContactPointsRequest", + "RegionalApiListDataSourcesRequest", + "RegionalApiListTokensRequest", + "RegionalApiTriggerTestAlertRequest", + "UsageOverview", + "CockpitV1GlobalAPI", + "CockpitV1RegionalAPI", +] diff --git a/scaleway/scaleway/cockpit/v1/api.py b/scaleway/scaleway/cockpit/v1/api.py new file mode 100644 index 000000000..6c441862e --- /dev/null +++ b/scaleway/scaleway/cockpit/v1/api.py @@ -0,0 +1,1402 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import List, Optional + +from scaleway_core.api import API +from scaleway_core.bridge import ( + Region, +) +from scaleway_core.utils import ( + validate_path_param, + fetch_all_pages, +) +from .types import ( + DataSourceOrigin, + DataSourceType, + GrafanaUserRole, + ListDataSourcesRequestOrderBy, + ListGrafanaUsersRequestOrderBy, + ListPlansRequestOrderBy, + ListTokensRequestOrderBy, + PlanName, + TokenScope, + AlertManager, + ContactPoint, + ContactPointEmail, + DataSource, + GlobalApiCreateGrafanaUserRequest, + GlobalApiResetGrafanaUserPasswordRequest, + GlobalApiSelectPlanRequest, + GlobalApiSyncGrafanaDataSourcesRequest, + Grafana, + GrafanaProductDashboard, + GrafanaUser, + ListContactPointsResponse, + ListDataSourcesResponse, + ListGrafanaProductDashboardsResponse, + ListGrafanaUsersResponse, + ListPlansResponse, + ListTokensResponse, + Plan, + RegionalApiCreateContactPointRequest, + RegionalApiCreateDataSourceRequest, + RegionalApiCreateTokenRequest, + RegionalApiDeleteContactPointRequest, + RegionalApiDisableAlertManagerRequest, + RegionalApiDisableManagedAlertsRequest, + RegionalApiEnableAlertManagerRequest, + RegionalApiEnableManagedAlertsRequest, + RegionalApiTriggerTestAlertRequest, + Token, + UsageOverview, +) +from .marshalling import ( + unmarshal_ContactPoint, + unmarshal_DataSource, + unmarshal_GrafanaProductDashboard, + unmarshal_GrafanaUser, + unmarshal_Plan, + unmarshal_Token, + unmarshal_AlertManager, + unmarshal_Grafana, + unmarshal_ListContactPointsResponse, + unmarshal_ListDataSourcesResponse, + unmarshal_ListGrafanaProductDashboardsResponse, + unmarshal_ListGrafanaUsersResponse, + unmarshal_ListPlansResponse, + unmarshal_ListTokensResponse, + unmarshal_UsageOverview, + marshal_GlobalApiCreateGrafanaUserRequest, + marshal_GlobalApiResetGrafanaUserPasswordRequest, + marshal_GlobalApiSelectPlanRequest, + marshal_GlobalApiSyncGrafanaDataSourcesRequest, + marshal_RegionalApiCreateContactPointRequest, + marshal_RegionalApiCreateDataSourceRequest, + marshal_RegionalApiCreateTokenRequest, + marshal_RegionalApiDeleteContactPointRequest, + marshal_RegionalApiDisableAlertManagerRequest, + marshal_RegionalApiDisableManagedAlertsRequest, + marshal_RegionalApiEnableAlertManagerRequest, + marshal_RegionalApiEnableManagedAlertsRequest, + marshal_RegionalApiTriggerTestAlertRequest, +) + + +class CockpitV1GlobalAPI(API): + """ + The Cockpit Global API allows you to manage your Cockpit's Grafana and plans. + """ + + def get_grafana( + self, + *, + project_id: Optional[str] = None, + ) -> Grafana: + """ + Get your Cockpit's Grafana. + Retrieve information on your Cockpit's Grafana, specified by the ID of the Project the Cockpit belongs to. + The output returned displays the URL to access your Cockpit's Grafana. + :param project_id: ID of the Project. + :return: :class:`Grafana ` + + Usage: + :: + + result = api.get_grafana() + """ + + res = self._request( + "GET", + "/cockpit/v1/grafana", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_Grafana(res.json()) + + def sync_grafana_data_sources( + self, + *, + project_id: Optional[str] = None, + ) -> None: + """ + Synchronize Grafana data sources. + Trigger the synchronization of all your data sources and the alert manager in the relevant regions. The alert manager will only be synchronized if you have enabled it. + :param project_id: ID of the Project to target. + + Usage: + :: + + result = api.sync_grafana_data_sources() + """ + + res = self._request( + "POST", + "/cockpit/v1/grafana/sync-data-sources", + body=marshal_GlobalApiSyncGrafanaDataSourcesRequest( + GlobalApiSyncGrafanaDataSourcesRequest( + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def create_grafana_user( + self, + *, + login: str, + project_id: Optional[str] = None, + role: Optional[GrafanaUserRole] = None, + ) -> GrafanaUser: + """ + Create a Grafana user. + Create a Grafana user to connect to your Cockpit's Grafana. Upon creation, your user password displays only once, so make sure that you save it. + Each Grafana user is associated with a role: viewer or editor. A viewer can only view dashboards, whereas an editor can create and edit dashboards. Note that the `admin` username is not available for creation. + :param login: Username of the Grafana user. Note that the `admin` username is not available for creation. + :param project_id: ID of the Project in which to create the Grafana user. + :param role: Role assigned to the Grafana user. + :return: :class:`GrafanaUser ` + + Usage: + :: + + result = api.create_grafana_user( + login="example", + ) + """ + + res = self._request( + "POST", + "/cockpit/v1/grafana/users", + body=marshal_GlobalApiCreateGrafanaUserRequest( + GlobalApiCreateGrafanaUserRequest( + login=login, + project_id=project_id, + role=role, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_GrafanaUser(res.json()) + + def list_grafana_users( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListGrafanaUsersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> ListGrafanaUsersResponse: + """ + List Grafana users. + List all Grafana users created in your Cockpit's Grafana. By default, the Grafana users returned in the list are ordered in ascending order. + :param page: Page number. + :param page_size: Page size. + :param order_by: Order of the Grafana users. + :param project_id: ID of the Project to target. + :return: :class:`ListGrafanaUsersResponse ` + + Usage: + :: + + result = api.list_grafana_users() + """ + + res = self._request( + "GET", + "/cockpit/v1/grafana/users", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListGrafanaUsersResponse(res.json()) + + def list_grafana_users_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListGrafanaUsersRequestOrderBy] = None, + project_id: Optional[str] = None, + ) -> List[GrafanaUser]: + """ + List Grafana users. + List all Grafana users created in your Cockpit's Grafana. By default, the Grafana users returned in the list are ordered in ascending order. + :param page: Page number. + :param page_size: Page size. + :param order_by: Order of the Grafana users. + :param project_id: ID of the Project to target. + :return: :class:`List[GrafanaUser] ` + + Usage: + :: + + result = api.list_grafana_users_all() + """ + + return fetch_all_pages( + type=ListGrafanaUsersResponse, + key="grafana_users", + fetcher=self.list_grafana_users, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + }, + ) + + def delete_grafana_user( + self, + *, + project_id: Optional[str] = None, + grafana_user_id: int, + ) -> None: + """ + Delete a Grafana user. + Delete a Grafana user from your Cockpit's Grafana, specified by the ID of the Project the Cockpit belongs to, and the ID of the Grafana user. + :param project_id: ID of the Project to target. + :param grafana_user_id: ID of the Grafana user. + + Usage: + :: + + result = api.delete_grafana_user( + grafana_user_id=1, + ) + """ + + param_grafana_user_id = validate_path_param("grafana_user_id", grafana_user_id) + + res = self._request( + "DELETE", + f"/cockpit/v1/grafana/users/{param_grafana_user_id}", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + + def reset_grafana_user_password( + self, + *, + project_id: Optional[str] = None, + grafana_user_id: int, + ) -> GrafanaUser: + """ + Reset a Grafana user password. + Reset the password of a Grafana user, specified by the ID of the Project the Cockpit belongs to, and the ID of the Grafana user. + A new password regenerates and only displays once. Make sure that you save it. + :param project_id: ID of the Project to target. + :param grafana_user_id: ID of the Grafana user. + :return: :class:`GrafanaUser ` + + Usage: + :: + + result = api.reset_grafana_user_password( + grafana_user_id=1, + ) + """ + + param_grafana_user_id = validate_path_param("grafana_user_id", grafana_user_id) + + res = self._request( + "POST", + f"/cockpit/v1/grafana/users/{param_grafana_user_id}/reset-password", + body=marshal_GlobalApiResetGrafanaUserPasswordRequest( + GlobalApiResetGrafanaUserPasswordRequest( + project_id=project_id, + grafana_user_id=grafana_user_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_GrafanaUser(res.json()) + + def list_grafana_product_dashboards( + self, + *, + project_id: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + tags: Optional[List[str]] = None, + ) -> ListGrafanaProductDashboardsResponse: + """ + List Scaleway resources dashboards. + Retrieve a list of available dashboards in Grafana, for all Scaleway resources which are integrated with Cockpit. + :param project_id: ID of the Project to target. + :param page: Page number. + :param page_size: Page size. + :param tags: Tags to filter for. + :return: :class:`ListGrafanaProductDashboardsResponse ` + + Usage: + :: + + result = api.list_grafana_product_dashboards() + """ + + res = self._request( + "GET", + "/cockpit/v1/grafana/product-dashboards", + params={ + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "tags": tags, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListGrafanaProductDashboardsResponse(res.json()) + + def list_grafana_product_dashboards_all( + self, + *, + project_id: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + tags: Optional[List[str]] = None, + ) -> List[GrafanaProductDashboard]: + """ + List Scaleway resources dashboards. + Retrieve a list of available dashboards in Grafana, for all Scaleway resources which are integrated with Cockpit. + :param project_id: ID of the Project to target. + :param page: Page number. + :param page_size: Page size. + :param tags: Tags to filter for. + :return: :class:`List[GrafanaProductDashboard] ` + + Usage: + :: + + result = api.list_grafana_product_dashboards_all() + """ + + return fetch_all_pages( + type=ListGrafanaProductDashboardsResponse, + key="dashboards", + fetcher=self.list_grafana_product_dashboards, + args={ + "project_id": project_id, + "page": page, + "page_size": page_size, + "tags": tags, + }, + ) + + def get_grafana_product_dashboard( + self, + *, + project_id: Optional[str] = None, + dashboard_name: str, + ) -> GrafanaProductDashboard: + """ + Get Scaleway resource dashboard. + Retrieve information about the dashboard of a Scaleway resource in Grafana, specified by the ID of the Project the Cockpit belongs to, and the name of the dashboard. + :param project_id: ID of the Project the dashboard belongs to. + :param dashboard_name: Name of the dashboard. + :return: :class:`GrafanaProductDashboard ` + + Usage: + :: + + result = api.get_grafana_product_dashboard( + dashboard_name="example", + ) + """ + + param_dashboard_name = validate_path_param("dashboard_name", dashboard_name) + + res = self._request( + "GET", + f"/cockpit/v1/grafana/product-dashboards/{param_dashboard_name}", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_GrafanaProductDashboard(res.json()) + + def list_plans( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListPlansRequestOrderBy] = None, + ) -> ListPlansResponse: + """ + List plan types. + Retrieve a list of available pricing plan types. + :param page: Page number. + :param page_size: Page size. + :param order_by: + :return: :class:`ListPlansResponse ` + + Usage: + :: + + result = api.list_plans() + """ + + res = self._request( + "GET", + "/cockpit/v1/plans", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListPlansResponse(res.json()) + + def list_plans_all( + self, + *, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListPlansRequestOrderBy] = None, + ) -> List[Plan]: + """ + List plan types. + Retrieve a list of available pricing plan types. + :param page: Page number. + :param page_size: Page size. + :param order_by: + :return: :class:`List[Plan] ` + + Usage: + :: + + result = api.list_plans_all() + """ + + return fetch_all_pages( + type=ListPlansResponse, + key="plans", + fetcher=self.list_plans, + args={ + "page": page, + "page_size": page_size, + "order_by": order_by, + }, + ) + + def select_plan( + self, + *, + project_id: Optional[str] = None, + plan_name: Optional[PlanName] = None, + ) -> Plan: + """ + Apply a pricing plan. + Apply a pricing plan on a given Project. You must specify the ID of the pricing plan type. Note that you will be billed for the plan you apply. + :param project_id: ID of the Project. + :param plan_name: Name of the pricing plan. + :return: :class:`Plan ` + + Usage: + :: + + result = api.select_plan() + """ + + res = self._request( + "PATCH", + "/cockpit/v1/plans", + body=marshal_GlobalApiSelectPlanRequest( + GlobalApiSelectPlanRequest( + project_id=project_id, + plan_name=plan_name, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Plan(res.json()) + + def get_current_plan( + self, + *, + project_id: Optional[str] = None, + ) -> Plan: + """ + Get current plan. + Retrieve a pricing plan for the given Project, specified by the ID of the Project. + :param project_id: ID of the Project. + :return: :class:`Plan ` + + Usage: + :: + + result = api.get_current_plan() + """ + + res = self._request( + "GET", + "/cockpit/v1/current-plan", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_Plan(res.json()) + + +class CockpitV1RegionalAPI(API): + """ + The Cockpit Regional API allows you to create data sources and tokens to store and query data types such as metrics, logs, and traces. You can also push your data into Cockpit, and send alerts to your contact points when your resources may require your attention, using the regional Alert manager. + """ + + def create_data_source( + self, + *, + name: str, + region: Optional[Region] = None, + project_id: Optional[str] = None, + type_: Optional[DataSourceType] = None, + ) -> DataSource: + """ + Create a data source. + You must specify the data source type upon creation. Available data source types include: + - metrics + - logs + - traces + The name of the data source will then be used as reference to name the associated Grafana data source. + :param name: Data source name. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project the data source belongs to. + :param type_: Data source type. + :return: :class:`DataSource ` + + Usage: + :: + + result = api.create_data_source( + name="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/data-sources", + body=marshal_RegionalApiCreateDataSourceRequest( + RegionalApiCreateDataSourceRequest( + name=name, + region=region, + project_id=project_id, + type_=type_, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_DataSource(res.json()) + + def get_data_source( + self, + *, + data_source_id: str, + region: Optional[Region] = None, + ) -> DataSource: + """ + Get a data source. + Retrieve information about a given data source, specified by the data source ID. The data source's information such as its name, type, URL, origin, and retention period, is returned. + :param data_source_id: ID of the relevant data source. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`DataSource ` + + Usage: + :: + + result = api.get_data_source( + data_source_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_data_source_id = validate_path_param("data_source_id", data_source_id) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/data-sources/{param_data_source_id}", + ) + + self._throw_on_error(res) + return unmarshal_DataSource(res.json()) + + def delete_data_source( + self, + *, + data_source_id: str, + region: Optional[Region] = None, + ) -> None: + """ + Delete a data source. + Delete a given data source, specified by the data source ID. Note that deleting a data source is irreversible, and cannot be undone. + :param data_source_id: ID of the data source to delete. + :param region: Region to target. If none is passed will use default region from the config. + + Usage: + :: + + result = api.delete_data_source( + data_source_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_data_source_id = validate_path_param("data_source_id", data_source_id) + + res = self._request( + "DELETE", + f"/cockpit/v1/regions/{param_region}/data-sources/{param_data_source_id}", + ) + + self._throw_on_error(res) + + def list_data_sources( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListDataSourcesRequestOrderBy] = None, + project_id: Optional[str] = None, + origin: Optional[DataSourceOrigin] = None, + types: Optional[List[DataSourceType]] = None, + ) -> ListDataSourcesResponse: + """ + List data sources. + Retrieve the list of data sources available in the specified region. By default, the data sources returned in the list are ordered by creation date, in ascending order. + You can list data sources by Project, type and origin. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Number of data sources to return per page. + :param order_by: Sort order for data sources in the response. + :param project_id: Project ID to filter for, only data sources from this Project will be returned. + :param origin: Origin to filter for, only data sources with matching origin will be returned. + :param types: Types to filter for, only data sources with matching types will be returned. + :return: :class:`ListDataSourcesResponse ` + + Usage: + :: + + result = api.list_data_sources() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/data-sources", + params={ + "order_by": order_by, + "origin": origin, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "types": types, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListDataSourcesResponse(res.json()) + + def list_data_sources_all( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListDataSourcesRequestOrderBy] = None, + project_id: Optional[str] = None, + origin: Optional[DataSourceOrigin] = None, + types: Optional[List[DataSourceType]] = None, + ) -> List[DataSource]: + """ + List data sources. + Retrieve the list of data sources available in the specified region. By default, the data sources returned in the list are ordered by creation date, in ascending order. + You can list data sources by Project, type and origin. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Number of data sources to return per page. + :param order_by: Sort order for data sources in the response. + :param project_id: Project ID to filter for, only data sources from this Project will be returned. + :param origin: Origin to filter for, only data sources with matching origin will be returned. + :param types: Types to filter for, only data sources with matching types will be returned. + :return: :class:`List[DataSource] ` + + Usage: + :: + + result = api.list_data_sources_all() + """ + + return fetch_all_pages( + type=ListDataSourcesResponse, + key="data_sources", + fetcher=self.list_data_sources, + args={ + "region": region, + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + "origin": origin, + "types": types, + }, + ) + + def get_usage_overview( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + interval: Optional[str] = None, + ) -> UsageOverview: + """ + Get data source usage overview. + Retrieve the data source usage overview per type for the specified Project. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: + :param interval: + :return: :class:`UsageOverview ` + + Usage: + :: + + result = api.get_usage_overview() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/usage-overview", + params={ + "interval": interval, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_UsageOverview(res.json()) + + def create_token( + self, + *, + name: str, + region: Optional[Region] = None, + project_id: Optional[str] = None, + token_scopes: Optional[List[TokenScope]] = None, + ) -> Token: + """ + Create a token. + Give your token the relevant scopes to ensure it has the right permissions to interact with your data sources and the Alert manager. Make sure that you create your token in the same regions as the data sources you want to use it for. + Upon creation, your token's secret key display only once. Make sure that you save it. + :param name: Name of the token. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project the token belongs to. + :param token_scopes: Token permission scopes. + :return: :class:`Token ` + + Usage: + :: + + result = api.create_token( + name="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/tokens", + body=marshal_RegionalApiCreateTokenRequest( + RegionalApiCreateTokenRequest( + name=name, + region=region, + project_id=project_id, + token_scopes=token_scopes, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Token(res.json()) + + def list_tokens( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListTokensRequestOrderBy] = None, + project_id: Optional[str] = None, + token_scopes: Optional[List[TokenScope]] = None, + ) -> ListTokensResponse: + """ + List tokens. + Retrieve a list of all tokens in the specified region. By default, tokens returned in the list are ordered by creation date, in ascending order. + You can filter tokens by Project ID and token scopes. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Number of tokens to return per page. + :param order_by: Order in which to return results. + :param project_id: ID of the Project the tokens belong to. + :param token_scopes: Token scopes to filter for. + :return: :class:`ListTokensResponse ` + + Usage: + :: + + result = api.list_tokens() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/tokens", + params={ + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "token_scopes": token_scopes, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListTokensResponse(res.json()) + + def list_tokens_all( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListTokensRequestOrderBy] = None, + project_id: Optional[str] = None, + token_scopes: Optional[List[TokenScope]] = None, + ) -> List[Token]: + """ + List tokens. + Retrieve a list of all tokens in the specified region. By default, tokens returned in the list are ordered by creation date, in ascending order. + You can filter tokens by Project ID and token scopes. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Number of tokens to return per page. + :param order_by: Order in which to return results. + :param project_id: ID of the Project the tokens belong to. + :param token_scopes: Token scopes to filter for. + :return: :class:`List[Token] ` + + Usage: + :: + + result = api.list_tokens_all() + """ + + return fetch_all_pages( + type=ListTokensResponse, + key="tokens", + fetcher=self.list_tokens, + args={ + "region": region, + "page": page, + "page_size": page_size, + "order_by": order_by, + "project_id": project_id, + "token_scopes": token_scopes, + }, + ) + + def get_token( + self, + *, + token_id: str, + region: Optional[Region] = None, + ) -> Token: + """ + Get a token. + Retrieve information about a given token, specified by the token ID. The token's information such as its scopes, is returned. + :param token_id: Token ID. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`Token ` + + Usage: + :: + + result = api.get_token( + token_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_token_id = validate_path_param("token_id", token_id) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/tokens/{param_token_id}", + ) + + self._throw_on_error(res) + return unmarshal_Token(res.json()) + + def delete_token( + self, + *, + token_id: str, + region: Optional[Region] = None, + ) -> None: + """ + Delete a token. + Delete a given token, specified by the token ID. Deleting a token is irreversible and cannot be undone. + :param token_id: ID of the token to delete. + :param region: Region to target. If none is passed will use default region from the config. + + Usage: + :: + + result = api.delete_token( + token_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_token_id = validate_path_param("token_id", token_id) + + res = self._request( + "DELETE", + f"/cockpit/v1/regions/{param_region}/tokens/{param_token_id}", + ) + + self._throw_on_error(res) + + def get_alert_manager( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AlertManager: + """ + Get the Alert manager. + Retrieve information about the Alert manager which is unique per Project and region. By default the Alert manager is disabled. + The output returned displays a URL to access the Alert manager, and whether the Alert manager and managed alerts are enabled. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: Project ID of the requested Alert manager. + :return: :class:`AlertManager ` + + Usage: + :: + + result = api.get_alert_manager() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/alert-manager", + params={ + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_AlertManager(res.json()) + + def enable_alert_manager( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AlertManager: + """ + Enable the Alert manager. + Enabling the Alert manager allows you to enable managed alerts and create contact points in the specified Project and region, to be notified when your Scaleway resources may require your attention. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project to enable the Alert manager in. + :return: :class:`AlertManager ` + + Usage: + :: + + result = api.enable_alert_manager() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/enable", + body=marshal_RegionalApiEnableAlertManagerRequest( + RegionalApiEnableAlertManagerRequest( + region=region, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_AlertManager(res.json()) + + def disable_alert_manager( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AlertManager: + """ + Disable the Alert manager. + Disabling the Alert manager deletes the contact points you have created and disables managed alerts in the specified Project and region. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project to disable the Alert manager in. + :return: :class:`AlertManager ` + + Usage: + :: + + result = api.disable_alert_manager() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/disable", + body=marshal_RegionalApiDisableAlertManagerRequest( + RegionalApiDisableAlertManagerRequest( + region=region, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_AlertManager(res.json()) + + def create_contact_point( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + email: Optional[ContactPointEmail] = None, + ) -> ContactPoint: + """ + Create a contact point. + Contact points are email addresses associated with the default receiver, that the Alert manager sends alerts to. + The source of the alerts are data sources within the same Project and region as the Alert manager. + If you need to receive alerts for other receivers, you can create additional contact points and receivers in Grafana. Make sure that you select the Scaleway Alert manager. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project to create the contact point in. + :param email: Email address of the contact point to create. + One-Of ('configuration'): at most one of 'email' could be set. + :return: :class:`ContactPoint ` + + Usage: + :: + + result = api.create_contact_point() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/contact-points", + body=marshal_RegionalApiCreateContactPointRequest( + RegionalApiCreateContactPointRequest( + region=region, + project_id=project_id, + email=email, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_ContactPoint(res.json()) + + def list_contact_points( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + project_id: Optional[str] = None, + ) -> ListContactPointsResponse: + """ + List contact points. + Retrieve a list of contact points for the specified Project. The response lists all contact points and receivers created in Grafana or via the API. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Total count of contact points to return per page. + :param project_id: ID of the Project containing the contact points to list. + :return: :class:`ListContactPointsResponse ` + + Usage: + :: + + result = api.list_contact_points() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/cockpit/v1/regions/{param_region}/alert-manager/contact-points", + params={ + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListContactPointsResponse(res.json()) + + def list_contact_points_all( + self, + *, + region: Optional[Region] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + project_id: Optional[str] = None, + ) -> List[ContactPoint]: + """ + List contact points. + Retrieve a list of contact points for the specified Project. The response lists all contact points and receivers created in Grafana or via the API. + :param region: Region to target. If none is passed will use default region from the config. + :param page: Page number to return, from the paginated results. + :param page_size: Total count of contact points to return per page. + :param project_id: ID of the Project containing the contact points to list. + :return: :class:`List[ContactPoint] ` + + Usage: + :: + + result = api.list_contact_points_all() + """ + + return fetch_all_pages( + type=ListContactPointsResponse, + key="contact_points", + fetcher=self.list_contact_points, + args={ + "region": region, + "page": page, + "page_size": page_size, + "project_id": project_id, + }, + ) + + def delete_contact_point( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + email: Optional[ContactPointEmail] = None, + ) -> None: + """ + Delete a contact point. + Delete a contact point associated with the default receiver. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project containing the contact point to delete. + :param email: Email address of the contact point to delete. + One-Of ('configuration'): at most one of 'email' could be set. + + Usage: + :: + + result = api.delete_contact_point() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/contact-points/delete", + body=marshal_RegionalApiDeleteContactPointRequest( + RegionalApiDeleteContactPointRequest( + region=region, + project_id=project_id, + email=email, + ), + self.client, + ), + ) + + self._throw_on_error(res) + + def enable_managed_alerts( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AlertManager: + """ + Enable managed alerts. + Enable the sending of managed alerts for the specified Project. Managed alerts are predefined alerts that apply to Scaleway recources integrated with Cockpit by default. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project. + :return: :class:`AlertManager ` + + Usage: + :: + + result = api.enable_managed_alerts() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/managed-alerts/enable", + body=marshal_RegionalApiEnableManagedAlertsRequest( + RegionalApiEnableManagedAlertsRequest( + region=region, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_AlertManager(res.json()) + + def disable_managed_alerts( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> AlertManager: + """ + Disable managed alerts. + Disable the sending of managed alerts for the specified Project. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project. + :return: :class:`AlertManager ` + + Usage: + :: + + result = api.disable_managed_alerts() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/managed-alerts/disable", + body=marshal_RegionalApiDisableManagedAlertsRequest( + RegionalApiDisableManagedAlertsRequest( + region=region, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_AlertManager(res.json()) + + def trigger_test_alert( + self, + *, + region: Optional[Region] = None, + project_id: Optional[str] = None, + ) -> None: + """ + Trigger a test alert. + Send a test alert to the Alert manager to make sure your contact points get notified. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: ID of the Project. + + Usage: + :: + + result = api.trigger_test_alert() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/cockpit/v1/regions/{param_region}/alert-manager/trigger-test-alert", + body=marshal_RegionalApiTriggerTestAlertRequest( + RegionalApiTriggerTestAlertRequest( + region=region, + project_id=project_id, + ), + self.client, + ), + ) + + self._throw_on_error(res) diff --git a/scaleway/scaleway/cockpit/v1/marshalling.py b/scaleway/scaleway/cockpit/v1/marshalling.py new file mode 100644 index 000000000..12bc2dd4b --- /dev/null +++ b/scaleway/scaleway/cockpit/v1/marshalling.py @@ -0,0 +1,730 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import Any, Dict +from dateutil import parser + +from scaleway_core.profile import ProfileDefaults +from scaleway_core.utils import ( + OneOfPossibility, + resolve_one_of, +) +from .types import ( + TokenScope, + ContactPointEmail, + ContactPoint, + DataSource, + GrafanaProductDashboard, + GrafanaUser, + Plan, + Token, + AlertManager, + Grafana, + ListContactPointsResponse, + ListDataSourcesResponse, + ListGrafanaProductDashboardsResponse, + ListGrafanaUsersResponse, + ListPlansResponse, + ListTokensResponse, + Usage, + UsageOverview, + GlobalApiCreateGrafanaUserRequest, + GlobalApiResetGrafanaUserPasswordRequest, + GlobalApiSelectPlanRequest, + GlobalApiSyncGrafanaDataSourcesRequest, + RegionalApiCreateContactPointRequest, + RegionalApiCreateDataSourceRequest, + RegionalApiCreateTokenRequest, + RegionalApiDeleteContactPointRequest, + RegionalApiDisableAlertManagerRequest, + RegionalApiDisableManagedAlertsRequest, + RegionalApiEnableAlertManagerRequest, + RegionalApiEnableManagedAlertsRequest, + RegionalApiTriggerTestAlertRequest, +) + + +def unmarshal_ContactPointEmail(data: Any) -> ContactPointEmail: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ContactPointEmail' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("to", None) + if field is not None: + args["to"] = field + + return ContactPointEmail(**args) + + +def unmarshal_ContactPoint(data: Any) -> ContactPoint: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ContactPoint' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("region", None) + if field is not None: + args["region"] = field + + field = data.get("email", None) + if field is not None: + args["email"] = unmarshal_ContactPointEmail(field) + + return ContactPoint(**args) + + +def unmarshal_DataSource(data: Any) -> DataSource: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'DataSource' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("url", None) + if field is not None: + args["url"] = field + + field = data.get("type_", None) + if field is not None: + args["type_"] = field + + field = data.get("origin", None) + if field is not None: + args["origin"] = field + + field = data.get("synchronized_with_grafana", None) + if field is not None: + args["synchronized_with_grafana"] = field + + field = data.get("region", None) + if field is not None: + args["region"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + + return DataSource(**args) + + +def unmarshal_GrafanaProductDashboard(data: Any) -> GrafanaProductDashboard: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GrafanaProductDashboard' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("title", None) + if field is not None: + args["title"] = field + + field = data.get("url", None) + if field is not None: + args["url"] = field + + field = data.get("tags", None) + if field is not None: + args["tags"] = field + + field = data.get("variables", None) + if field is not None: + args["variables"] = field + + return GrafanaProductDashboard(**args) + + +def unmarshal_GrafanaUser(data: Any) -> GrafanaUser: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'GrafanaUser' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("login", None) + if field is not None: + args["login"] = field + + field = data.get("role", None) + if field is not None: + args["role"] = field + + field = data.get("password", None) + if field is not None: + args["password"] = field + + return GrafanaUser(**args) + + +def unmarshal_Plan(data: Any) -> Plan: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Plan' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("sample_ingestion_price", None) + if field is not None: + args["sample_ingestion_price"] = field + + field = data.get("logs_ingestion_price", None) + if field is not None: + args["logs_ingestion_price"] = field + + field = data.get("traces_ingestion_price", None) + if field is not None: + args["traces_ingestion_price"] = field + + field = data.get("monthly_price", None) + if field is not None: + args["monthly_price"] = field + + field = data.get("retention_metrics_interval", None) + if field is not None: + args["retention_metrics_interval"] = field + + field = data.get("retention_logs_interval", None) + if field is not None: + args["retention_logs_interval"] = field + + field = data.get("retention_traces_interval", None) + if field is not None: + args["retention_traces_interval"] = field + + return Plan(**args) + + +def unmarshal_Token(data: Any) -> Token: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Token' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("name", None) + if field is not None: + args["name"] = field + + field = data.get("scopes", None) + if field is not None: + args["scopes"] = [TokenScope(v) for v in field] if field is not None else None + + field = data.get("region", None) + if field is not None: + args["region"] = field + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + + field = data.get("secret_key", None) + if field is not None: + args["secret_key"] = field + + return Token(**args) + + +def unmarshal_AlertManager(data: Any) -> AlertManager: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'AlertManager' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("alert_manager_enabled", None) + if field is not None: + args["alert_manager_enabled"] = field + + field = data.get("managed_alerts_enabled", None) + if field is not None: + args["managed_alerts_enabled"] = field + + field = data.get("region", None) + if field is not None: + args["region"] = field + + field = data.get("alert_manager_url", None) + if field is not None: + args["alert_manager_url"] = field + + return AlertManager(**args) + + +def unmarshal_Grafana(data: Any) -> Grafana: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Grafana' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("grafana_url", None) + if field is not None: + args["grafana_url"] = field + + return Grafana(**args) + + +def unmarshal_ListContactPointsResponse(data: Any) -> ListContactPointsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListContactPointsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("contact_points", None) + if field is not None: + args["contact_points"] = ( + [unmarshal_ContactPoint(v) for v in field] if field is not None else None + ) + + field = data.get("has_additional_receivers", None) + if field is not None: + args["has_additional_receivers"] = field + + field = data.get("has_additional_contact_points", None) + if field is not None: + args["has_additional_contact_points"] = field + + return ListContactPointsResponse(**args) + + +def unmarshal_ListDataSourcesResponse(data: Any) -> ListDataSourcesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListDataSourcesResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("data_sources", None) + if field is not None: + args["data_sources"] = ( + [unmarshal_DataSource(v) for v in field] if field is not None else None + ) + + return ListDataSourcesResponse(**args) + + +def unmarshal_ListGrafanaProductDashboardsResponse( + data: Any, +) -> ListGrafanaProductDashboardsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListGrafanaProductDashboardsResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("dashboards", None) + if field is not None: + args["dashboards"] = ( + [unmarshal_GrafanaProductDashboard(v) for v in field] + if field is not None + else None + ) + + return ListGrafanaProductDashboardsResponse(**args) + + +def unmarshal_ListGrafanaUsersResponse(data: Any) -> ListGrafanaUsersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListGrafanaUsersResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("grafana_users", None) + if field is not None: + args["grafana_users"] = ( + [unmarshal_GrafanaUser(v) for v in field] if field is not None else None + ) + + return ListGrafanaUsersResponse(**args) + + +def unmarshal_ListPlansResponse(data: Any) -> ListPlansResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListPlansResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("plans", None) + if field is not None: + args["plans"] = ( + [unmarshal_Plan(v) for v in field] if field is not None else None + ) + + return ListPlansResponse(**args) + + +def unmarshal_ListTokensResponse(data: Any) -> ListTokensResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListTokensResponse' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + + field = data.get("tokens", None) + if field is not None: + args["tokens"] = ( + [unmarshal_Token(v) for v in field] if field is not None else None + ) + + return ListTokensResponse(**args) + + +def unmarshal_Usage(data: Any) -> Usage: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Usage' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + + field = data.get("data_source_origin", None) + if field is not None: + args["data_source_origin"] = field + + field = data.get("data_source_type", None) + if field is not None: + args["data_source_type"] = field + + field = data.get("unit", None) + if field is not None: + args["unit"] = field + + field = data.get("quantity_over_interval", None) + if field is not None: + args["quantity_over_interval"] = field + + field = data.get("region", None) + if field is not None: + args["region"] = field + + field = data.get("data_source_id", None) + if field is not None: + args["data_source_id"] = field + + field = data.get("interval", None) + if field is not None: + args["interval"] = field + + return Usage(**args) + + +def unmarshal_UsageOverview(data: Any) -> UsageOverview: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'UsageOverview' failed as data isn't a dictionary." + ) + + args: Dict[str, Any] = {} + + field = data.get("scaleway_metrics_usage", None) + if field is not None: + args["scaleway_metrics_usage"] = unmarshal_Usage(field) + + field = data.get("scaleway_logs_usage", None) + if field is not None: + args["scaleway_logs_usage"] = unmarshal_Usage(field) + + field = data.get("external_metrics_usage", None) + if field is not None: + args["external_metrics_usage"] = unmarshal_Usage(field) + + field = data.get("external_logs_usage", None) + if field is not None: + args["external_logs_usage"] = unmarshal_Usage(field) + + field = data.get("external_traces_usage", None) + if field is not None: + args["external_traces_usage"] = unmarshal_Usage(field) + + return UsageOverview(**args) + + +def marshal_GlobalApiCreateGrafanaUserRequest( + request: GlobalApiCreateGrafanaUserRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.login is not None: + output["login"] = request.login + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.role is not None: + output["role"] = str(request.role) + + return output + + +def marshal_GlobalApiResetGrafanaUserPasswordRequest( + request: GlobalApiResetGrafanaUserPasswordRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_GlobalApiSelectPlanRequest( + request: GlobalApiSelectPlanRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.plan_name is not None: + output["plan_name"] = str(request.plan_name) + + return output + + +def marshal_GlobalApiSyncGrafanaDataSourcesRequest( + request: GlobalApiSyncGrafanaDataSourcesRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_ContactPointEmail( + request: ContactPointEmail, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.to is not None: + output["to"] = request.to + + return output + + +def marshal_RegionalApiCreateContactPointRequest( + request: RegionalApiCreateContactPointRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + output.update( + resolve_one_of( + [ + OneOfPossibility("email", request.email), + ] + ), + ) + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiCreateDataSourceRequest( + request: RegionalApiCreateDataSourceRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.type_ is not None: + output["type"] = str(request.type_) + + return output + + +def marshal_RegionalApiCreateTokenRequest( + request: RegionalApiCreateTokenRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + if request.token_scopes is not None: + output["token_scopes"] = [str(item) for item in request.token_scopes] + + return output + + +def marshal_RegionalApiDeleteContactPointRequest( + request: RegionalApiDeleteContactPointRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + output.update( + resolve_one_of( + [ + OneOfPossibility("email", request.email), + ] + ), + ) + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiDisableAlertManagerRequest( + request: RegionalApiDisableAlertManagerRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiDisableManagedAlertsRequest( + request: RegionalApiDisableManagedAlertsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiEnableAlertManagerRequest( + request: RegionalApiEnableAlertManagerRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiEnableManagedAlertsRequest( + request: RegionalApiEnableManagedAlertsRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output + + +def marshal_RegionalApiTriggerTestAlertRequest( + request: RegionalApiTriggerTestAlertRequest, + defaults: ProfileDefaults, +) -> Dict[str, Any]: + output: Dict[str, Any] = {} + + if request.project_id is not None: + output["project_id"] = request.project_id or defaults.default_project_id + + return output diff --git a/scaleway/scaleway/cockpit/v1/types.py b/scaleway/scaleway/cockpit/v1/types.py new file mode 100644 index 000000000..e63357577 --- /dev/null +++ b/scaleway/scaleway/cockpit/v1/types.py @@ -0,0 +1,1135 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +from typing import List, Optional + +from scaleway_core.bridge import ( + Region, +) +from scaleway_core.utils import ( + StrEnumMeta, +) + + +class DataSourceOrigin(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_ORIGIN = "unknown_origin" + SCALEWAY = "scaleway" + EXTERNAL = "external" + + def __str__(self) -> str: + return str(self.value) + + +class DataSourceType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + METRICS = "metrics" + LOGS = "logs" + TRACES = "traces" + + def __str__(self) -> str: + return str(self.value) + + +class GrafanaUserRole(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_ROLE = "unknown_role" + EDITOR = "editor" + VIEWER = "viewer" + + def __str__(self) -> str: + return str(self.value) + + +class ListDataSourcesRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + TYPE_ASC = "type_asc" + TYPE_DESC = "type_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListGrafanaUsersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + LOGIN_ASC = "login_asc" + LOGIN_DESC = "login_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListPlansRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListTokensRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + + def __str__(self) -> str: + return str(self.value) + + +class PlanName(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_NAME = "unknown_name" + FREE = "free" + PREMIUM = "premium" + CUSTOM = "custom" + + def __str__(self) -> str: + return str(self.value) + + +class TokenScope(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_SCOPE = "unknown_scope" + READ_ONLY_METRICS = "read_only_metrics" + WRITE_ONLY_METRICS = "write_only_metrics" + FULL_ACCESS_METRICS_RULES = "full_access_metrics_rules" + READ_ONLY_LOGS = "read_only_logs" + WRITE_ONLY_LOGS = "write_only_logs" + FULL_ACCESS_LOGS_RULES = "full_access_logs_rules" + FULL_ACCESS_ALERT_MANAGER = "full_access_alert_manager" + READ_ONLY_TRACES = "read_only_traces" + WRITE_ONLY_TRACES = "write_only_traces" + + def __str__(self) -> str: + return str(self.value) + + +class UsageUnit(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_UNIT = "unknown_unit" + BYTES = "bytes" + SAMPLES = "samples" + + def __str__(self) -> str: + return str(self.value) + + +@dataclass +class ContactPointEmail: + to: str + + +@dataclass +class ContactPoint: + """ + Contact point. + """ + + region: Region + """ + Region to target. If none is passed will use default region from the config. + """ + + email: Optional[ContactPointEmail] + + +@dataclass +class DataSource: + """ + Data source. + """ + + id: str + """ + ID of the data source. + """ + + project_id: str + """ + ID of the Project the data source belongs to. + """ + + name: str + """ + Data source name. + """ + + url: str + """ + Data source URL. + """ + + type_: DataSourceType + """ + Data source type. + """ + + origin: DataSourceOrigin + """ + Data source origin. + """ + + synchronized_with_grafana: bool + """ + Indicates whether the data source is synchronized with Grafana. + """ + + region: Region + """ + Region of the data source. + """ + + created_at: Optional[datetime] + """ + Date the data source was created. + """ + + updated_at: Optional[datetime] + """ + Date the data source was last updated. + """ + + +@dataclass +class GrafanaProductDashboard: + """ + Grafana dashboard. + """ + + name: str + """ + Dashboard name. + """ + + title: str + """ + Dashboard title. + """ + + url: str + """ + Dashboard URL. + """ + + tags: List[str] + """ + Dashboard tags. + """ + + variables: List[str] + """ + Dashboard variables. + """ + + +@dataclass +class GrafanaUser: + """ + Grafana user. + """ + + id: int + """ + ID of the Grafana user. + """ + + login: str + """ + Username of the Grafana user. + """ + + role: GrafanaUserRole + """ + Role assigned to the Grafana user. + """ + + password: Optional[str] + """ + Grafana user's password. + """ + + +@dataclass +class Plan: + """ + Type of pricing plan. + """ + + name: PlanName + """ + Name of a given pricing plan. + """ + + sample_ingestion_price: int + """ + Ingestion price in cents for 1 million samples. + """ + + logs_ingestion_price: int + """ + Ingestion price in cents for 1 GB of logs. + """ + + traces_ingestion_price: int + """ + Ingestion price in cents for 1 GB of traces. + """ + + monthly_price: int + """ + Retention price in euros per month. + """ + + retention_metrics_interval: Optional[str] + """ + Interval of time during which Scaleway's Cockpit keeps your metrics. + """ + + retention_logs_interval: Optional[str] + """ + Interval of time during which Scaleway's Cockpit keeps your logs. + """ + + retention_traces_interval: Optional[str] + """ + Interval of time during which Scaleway's Cockpit keeps your traces. + """ + + +@dataclass +class Token: + """ + Token. + """ + + id: str + """ + ID of the token. + """ + + project_id: str + """ + ID of the Project the token belongs to. + """ + + name: str + """ + Name of the token. + """ + + scopes: List[TokenScope] + """ + Token permission scopes. + """ + + region: Region + """ + Regions where the token is located. + """ + + created_at: Optional[datetime] + """ + Token creation date. + """ + + updated_at: Optional[datetime] + """ + Token last modification date. + """ + + secret_key: Optional[str] + """ + Token secret key. + """ + + +@dataclass +class Usage: + """ + Data source usage. + """ + + project_id: str + """ + ID of the Project the data source belongs to. + """ + + data_source_origin: DataSourceOrigin + """ + Origin of the data source. + """ + + data_source_type: DataSourceType + """ + Type of the data source. + """ + + unit: UsageUnit + """ + Unit of the data source usage. + """ + + quantity_over_interval: int + """ + Data source usage for the given interval. + """ + + region: Region + """ + Region of the data source usage. + """ + + data_source_id: Optional[str] + """ + ID of the data source. + """ + + interval: Optional[str] + """ + Interval for the data source usage. + """ + + +@dataclass +class AlertManager: + """ + Alert manager information. + """ + + alert_manager_enabled: bool + """ + The Alert manager is enabled. + """ + + managed_alerts_enabled: bool + """ + Managed alerts are enabled. + """ + + region: Region + """ + Regions where the Alert manager is enabled. + """ + + alert_manager_url: Optional[str] + """ + Alert manager URL. + """ + + +@dataclass +class GlobalApiCreateGrafanaUserRequest: + """ + Create a Grafana user. + """ + + login: str + """ + Username of the Grafana user. Note that the `admin` username is not available for creation. + """ + + project_id: Optional[str] + """ + ID of the Project in which to create the Grafana user. + """ + + role: Optional[GrafanaUserRole] + """ + Role assigned to the Grafana user. + """ + + +@dataclass +class GlobalApiDeleteGrafanaUserRequest: + """ + Delete a Grafana user. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + grafana_user_id: int + """ + ID of the Grafana user. + """ + + +@dataclass +class GlobalApiGetCurrentPlanRequest: + """ + Retrieve a pricing plan for the given Project. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + +@dataclass +class GlobalApiGetGrafanaProductDashboardRequest: + """ + Retrieve a specific dashboard. + """ + + project_id: Optional[str] + """ + ID of the Project the dashboard belongs to. + """ + + dashboard_name: str + """ + Name of the dashboard. + """ + + +@dataclass +class GlobalApiGetGrafanaRequest: + """ + Request a Grafana. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + +@dataclass +class GlobalApiListGrafanaProductDashboardsRequest: + """ + Retrieve a list of available product dashboards. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Page size. + """ + + tags: Optional[List[str]] + """ + Tags to filter for. + """ + + +@dataclass +class GlobalApiListGrafanaUsersRequest: + """ + List all Grafana users. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Page size. + """ + + order_by: Optional[ListGrafanaUsersRequestOrderBy] + """ + Order of the Grafana users. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + +@dataclass +class GlobalApiListPlansRequest: + """ + Retrieve a list of available pricing plans. + """ + + page: Optional[int] + """ + Page number. + """ + + page_size: Optional[int] + """ + Page size. + """ + + order_by: Optional[ListPlansRequestOrderBy] + + +@dataclass +class GlobalApiResetGrafanaUserPasswordRequest: + """ + Reset a Grafana user's password. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + grafana_user_id: int + """ + ID of the Grafana user. + """ + + +@dataclass +class GlobalApiSelectPlanRequest: + """ + Select a specific pricing plan. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + plan_name: Optional[PlanName] + """ + Name of the pricing plan. + """ + + +@dataclass +class GlobalApiSyncGrafanaDataSourcesRequest: + """ + Trigger the synchronization of all data sources created in the relevant regions. + """ + + project_id: Optional[str] + """ + ID of the Project to target. + """ + + +@dataclass +class Grafana: + """ + Grafana user. + """ + + grafana_url: str + """ + URL to access your Cockpit's Grafana. + """ + + +@dataclass +class ListContactPointsResponse: + """ + Response returned when listing contact points. + """ + + total_count: int + """ + Total count of contact points associated with the default receiver. + """ + + contact_points: List[ContactPoint] + """ + List of contact points associated with the default receiver. + """ + + has_additional_receivers: bool + """ + Indicates whether the Alert manager has other receivers than the default one. + """ + + has_additional_contact_points: bool + """ + Indicates whether there are unmanaged contact points on the default receiver. + """ + + +@dataclass +class ListDataSourcesResponse: + """ + Response returned when listing data sources. + """ + + total_count: int + """ + Total count of data sources matching the request. + """ + + data_sources: List[DataSource] + """ + Data sources matching the request within the pagination. + """ + + +@dataclass +class ListGrafanaProductDashboardsResponse: + """ + Output returned when listing dashboards. + """ + + total_count: int + """ + Total count of Grafana dashboards. + """ + + dashboards: List[GrafanaProductDashboard] + """ + Grafana dashboards information. + """ + + +@dataclass +class ListGrafanaUsersResponse: + """ + Ouptut returned when listing Grafana users. + """ + + total_count: int + """ + Total count of Grafana users. + """ + + grafana_users: List[GrafanaUser] + """ + Grafana users information. + """ + + +@dataclass +class ListPlansResponse: + """ + Output returned when listing pricing plans. + """ + + total_count: int + """ + Total count of available pricing plans. + """ + + plans: List[Plan] + """ + Plan types information. + """ + + +@dataclass +class ListTokensResponse: + """ + Response returned when listing tokens. + """ + + total_count: int + """ + Total count of tokens matching the request. + """ + + tokens: List[Token] + """ + Tokens matching the request within the pagination. + """ + + +@dataclass +class RegionalApiCreateContactPointRequest: + """ + Create a contact point. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project to create the contact point in. + """ + + email: Optional[ContactPointEmail] + + +@dataclass +class RegionalApiCreateDataSourceRequest: + """ + Create a data source. + """ + + name: str + """ + Data source name. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project the data source belongs to. + """ + + type_: Optional[DataSourceType] + """ + Data source type. + """ + + +@dataclass +class RegionalApiCreateTokenRequest: + """ + Create a token. + """ + + name: str + """ + Name of the token. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project the token belongs to. + """ + + token_scopes: Optional[List[TokenScope]] + """ + Token permission scopes. + """ + + +@dataclass +class RegionalApiDeleteContactPointRequest: + """ + Delete a contact point. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project containing the contact point to delete. + """ + + email: Optional[ContactPointEmail] + + +@dataclass +class RegionalApiDeleteDataSourceRequest: + """ + Delete a data source. + """ + + data_source_id: str + """ + ID of the data source to delete. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class RegionalApiDeleteTokenRequest: + """ + Delete a token. + """ + + token_id: str + """ + ID of the token to delete. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class RegionalApiDisableAlertManagerRequest: + """ + Disable the Alert manager. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project to disable the Alert manager in. + """ + + +@dataclass +class RegionalApiDisableManagedAlertsRequest: + """ + Disable the sending of managed alerts. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + +@dataclass +class RegionalApiEnableAlertManagerRequest: + """ + Enable the Alert manager. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project to enable the Alert manager in. + """ + + +@dataclass +class RegionalApiEnableManagedAlertsRequest: + """ + Enable the sending of managed alerts. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + +@dataclass +class RegionalApiGetAlertManagerRequest: + """ + Get the Alert manager. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + Project ID of the requested Alert manager. + """ + + +@dataclass +class RegionalApiGetDataSourceRequest: + """ + Retrieve a data source. + """ + + data_source_id: str + """ + ID of the relevant data source. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class RegionalApiGetTokenRequest: + """ + Get a token. + """ + + token_id: str + """ + Token ID. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class RegionalApiGetUsageOverviewRequest: + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + + interval: Optional[str] + + +@dataclass +class RegionalApiListContactPointsRequest: + """ + List contact points. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + page: Optional[int] + """ + Page number to return, from the paginated results. + """ + + page_size: Optional[int] + """ + Total count of contact points to return per page. + """ + + project_id: Optional[str] + """ + ID of the Project containing the contact points to list. + """ + + +@dataclass +class RegionalApiListDataSourcesRequest: + """ + List data sources. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + page: Optional[int] + """ + Page number to return, from the paginated results. + """ + + page_size: Optional[int] + """ + Number of data sources to return per page. + """ + + order_by: Optional[ListDataSourcesRequestOrderBy] + """ + Sort order for data sources in the response. + """ + + project_id: Optional[str] + """ + Project ID to filter for, only data sources from this Project will be returned. + """ + + origin: Optional[DataSourceOrigin] + """ + Origin to filter for, only data sources with matching origin will be returned. + """ + + types: Optional[List[DataSourceType]] + """ + Types to filter for, only data sources with matching types will be returned. + """ + + +@dataclass +class RegionalApiListTokensRequest: + """ + List tokens. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + page: Optional[int] + """ + Page number to return, from the paginated results. + """ + + page_size: Optional[int] + """ + Number of tokens to return per page. + """ + + order_by: Optional[ListTokensRequestOrderBy] + """ + Order in which to return results. + """ + + project_id: Optional[str] + """ + ID of the Project the tokens belong to. + """ + + token_scopes: Optional[List[TokenScope]] + """ + Token scopes to filter for. + """ + + +@dataclass +class RegionalApiTriggerTestAlertRequest: + """ + Request to trigger a test alert. + """ + + region: Optional[Region] + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] + """ + ID of the Project. + """ + + +@dataclass +class UsageOverview: + scaleway_metrics_usage: Optional[Usage] + + scaleway_logs_usage: Optional[Usage] + + external_metrics_usage: Optional[Usage] + + external_logs_usage: Optional[Usage] + + external_traces_usage: Optional[Usage] From 80b3f57a1f8178733f2b3f2e224228772d33b634 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Fri, 19 Apr 2024 16:53:31 +0200 Subject: [PATCH 23/25] feat(rdb): add of private network details (#496) --- scaleway-async/scaleway_async/rdb/v1/__init__.py | 2 ++ scaleway-async/scaleway_async/rdb/v1/marshalling.py | 4 ++++ scaleway-async/scaleway_async/rdb/v1/types.py | 13 +++++++++++++ scaleway/scaleway/rdb/v1/__init__.py | 2 ++ scaleway/scaleway/rdb/v1/marshalling.py | 4 ++++ scaleway/scaleway/rdb/v1/types.py | 13 +++++++++++++ 6 files changed, 38 insertions(+) diff --git a/scaleway-async/scaleway_async/rdb/v1/__init__.py b/scaleway-async/scaleway_async/rdb/v1/__init__.py index f2affddfb..2d37d9c62 100644 --- a/scaleway-async/scaleway_async/rdb/v1/__init__.py +++ b/scaleway-async/scaleway_async/rdb/v1/__init__.py @@ -5,6 +5,7 @@ from .types import ACLRuleProtocol from .types import DatabaseBackupStatus from .content import DATABASE_BACKUP_TRANSIENT_STATUSES +from .types import EndpointPrivateNetworkDetailsProvisioningMode from .types import EngineSettingPropertyType from .types import InstanceLogStatus from .content import INSTANCE_LOG_TRANSIENT_STATUSES @@ -150,6 +151,7 @@ "ACLRuleProtocol", "DatabaseBackupStatus", "DATABASE_BACKUP_TRANSIENT_STATUSES", + "EndpointPrivateNetworkDetailsProvisioningMode", "EngineSettingPropertyType", "InstanceLogStatus", "INSTANCE_LOG_TRANSIENT_STATUSES", diff --git a/scaleway-async/scaleway_async/rdb/v1/marshalling.py b/scaleway-async/scaleway_async/rdb/v1/marshalling.py index a480738f8..4d2440648 100644 --- a/scaleway-async/scaleway_async/rdb/v1/marshalling.py +++ b/scaleway-async/scaleway_async/rdb/v1/marshalling.py @@ -140,6 +140,10 @@ def unmarshal_EndpointPrivateNetworkDetails(data: Any) -> EndpointPrivateNetwork if field is not None: args["zone"] = field + field = data.get("provisioning_mode", None) + if field is not None: + args["provisioning_mode"] = field + return EndpointPrivateNetworkDetails(**args) diff --git a/scaleway-async/scaleway_async/rdb/v1/types.py b/scaleway-async/scaleway_async/rdb/v1/types.py index ef41a5e2f..21fe9974b 100644 --- a/scaleway-async/scaleway_async/rdb/v1/types.py +++ b/scaleway-async/scaleway_async/rdb/v1/types.py @@ -56,6 +56,14 @@ def __str__(self) -> str: return str(self.value) +class EndpointPrivateNetworkDetailsProvisioningMode(str, Enum, metaclass=StrEnumMeta): + STATIC = "static" + IPAM = "ipam" + + def __str__(self) -> str: + return str(self.value) + + class EngineSettingPropertyType(str, Enum, metaclass=StrEnumMeta): BOOLEAN = "boolean" INT = "int" @@ -286,6 +294,11 @@ class EndpointPrivateNetworkDetails: Private network zone. """ + provisioning_mode: EndpointPrivateNetworkDetailsProvisioningMode + """ + How endpoint ips are provisioned. + """ + @dataclass class EndpointSpecPrivateNetworkIpamConfig: diff --git a/scaleway/scaleway/rdb/v1/__init__.py b/scaleway/scaleway/rdb/v1/__init__.py index f2affddfb..2d37d9c62 100644 --- a/scaleway/scaleway/rdb/v1/__init__.py +++ b/scaleway/scaleway/rdb/v1/__init__.py @@ -5,6 +5,7 @@ from .types import ACLRuleProtocol from .types import DatabaseBackupStatus from .content import DATABASE_BACKUP_TRANSIENT_STATUSES +from .types import EndpointPrivateNetworkDetailsProvisioningMode from .types import EngineSettingPropertyType from .types import InstanceLogStatus from .content import INSTANCE_LOG_TRANSIENT_STATUSES @@ -150,6 +151,7 @@ "ACLRuleProtocol", "DatabaseBackupStatus", "DATABASE_BACKUP_TRANSIENT_STATUSES", + "EndpointPrivateNetworkDetailsProvisioningMode", "EngineSettingPropertyType", "InstanceLogStatus", "INSTANCE_LOG_TRANSIENT_STATUSES", diff --git a/scaleway/scaleway/rdb/v1/marshalling.py b/scaleway/scaleway/rdb/v1/marshalling.py index a480738f8..4d2440648 100644 --- a/scaleway/scaleway/rdb/v1/marshalling.py +++ b/scaleway/scaleway/rdb/v1/marshalling.py @@ -140,6 +140,10 @@ def unmarshal_EndpointPrivateNetworkDetails(data: Any) -> EndpointPrivateNetwork if field is not None: args["zone"] = field + field = data.get("provisioning_mode", None) + if field is not None: + args["provisioning_mode"] = field + return EndpointPrivateNetworkDetails(**args) diff --git a/scaleway/scaleway/rdb/v1/types.py b/scaleway/scaleway/rdb/v1/types.py index ef41a5e2f..21fe9974b 100644 --- a/scaleway/scaleway/rdb/v1/types.py +++ b/scaleway/scaleway/rdb/v1/types.py @@ -56,6 +56,14 @@ def __str__(self) -> str: return str(self.value) +class EndpointPrivateNetworkDetailsProvisioningMode(str, Enum, metaclass=StrEnumMeta): + STATIC = "static" + IPAM = "ipam" + + def __str__(self) -> str: + return str(self.value) + + class EngineSettingPropertyType(str, Enum, metaclass=StrEnumMeta): BOOLEAN = "boolean" INT = "int" @@ -286,6 +294,11 @@ class EndpointPrivateNetworkDetails: Private network zone. """ + provisioning_mode: EndpointPrivateNetworkDetailsProvisioningMode + """ + How endpoint ips are provisioned. + """ + @dataclass class EndpointSpecPrivateNetworkIpamConfig: From da4950a394ff24e76b7a7a4c358af382a0a389c4 Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Fri, 19 Apr 2024 16:53:54 +0200 Subject: [PATCH 24/25] feat(k8s): add routed_ip_enabled field to Cluster (#497) --- scaleway-async/scaleway_async/k8s/v1/marshalling.py | 4 ++++ scaleway-async/scaleway_async/k8s/v1/types.py | 5 +++++ scaleway/scaleway/k8s/v1/marshalling.py | 4 ++++ scaleway/scaleway/k8s/v1/types.py | 5 +++++ 4 files changed, 18 insertions(+) diff --git a/scaleway-async/scaleway_async/k8s/v1/marshalling.py b/scaleway-async/scaleway_async/k8s/v1/marshalling.py index 337381a2b..9f324c14b 100644 --- a/scaleway-async/scaleway_async/k8s/v1/marshalling.py +++ b/scaleway-async/scaleway_async/k8s/v1/marshalling.py @@ -453,6 +453,10 @@ def unmarshal_Cluster(data: Any) -> Cluster: parser.isoparse(field) if isinstance(field, str) else field ) + field = data.get("routed_ip_enabled", None) + if field is not None: + args["routed_ip_enabled"] = field + return Cluster(**args) diff --git a/scaleway-async/scaleway_async/k8s/v1/types.py b/scaleway-async/scaleway_async/k8s/v1/types.py index c03be2b1f..a242e152f 100644 --- a/scaleway-async/scaleway_async/k8s/v1/types.py +++ b/scaleway-async/scaleway_async/k8s/v1/types.py @@ -857,6 +857,11 @@ class Cluster: Date on which it will be possible to switch to a smaller offer. """ + routed_ip_enabled: Optional[bool] + """ + Defines whether routed IPs are enabled for nodes of this cluster. + """ + @dataclass class Node: diff --git a/scaleway/scaleway/k8s/v1/marshalling.py b/scaleway/scaleway/k8s/v1/marshalling.py index 337381a2b..9f324c14b 100644 --- a/scaleway/scaleway/k8s/v1/marshalling.py +++ b/scaleway/scaleway/k8s/v1/marshalling.py @@ -453,6 +453,10 @@ def unmarshal_Cluster(data: Any) -> Cluster: parser.isoparse(field) if isinstance(field, str) else field ) + field = data.get("routed_ip_enabled", None) + if field is not None: + args["routed_ip_enabled"] = field + return Cluster(**args) diff --git a/scaleway/scaleway/k8s/v1/types.py b/scaleway/scaleway/k8s/v1/types.py index c03be2b1f..a242e152f 100644 --- a/scaleway/scaleway/k8s/v1/types.py +++ b/scaleway/scaleway/k8s/v1/types.py @@ -857,6 +857,11 @@ class Cluster: Date on which it will be possible to switch to a smaller offer. """ + routed_ip_enabled: Optional[bool] + """ + Defines whether routed IPs are enabled for nodes of this cluster. + """ + @dataclass class Node: From 51152d9f5e9b81e649f958d178ac0871a045d15e Mon Sep 17 00:00:00 2001 From: Scaleway Bot Date: Fri, 19 Apr 2024 16:55:13 +0200 Subject: [PATCH 25/25] fix: adjust unmarshalling for protected field name and set none value for not optional field (#498) --- .../scaleway_async/account/v2/marshalling.py | 4 + .../scaleway_async/account/v3/marshalling.py | 4 + .../applesilicon/v1alpha1/marshalling.py | 20 +- .../baremetal/v1/marshalling.py | 54 ++++- .../billing/v2alpha1/marshalling.py | 26 +- .../billing/v2beta1/marshalling.py | 44 +++- .../block/v1alpha1/marshalling.py | 38 ++- .../scaleway_async/cockpit/v1/marshalling.py | 38 ++- .../cockpit/v1beta1/marshalling.py | 28 ++- .../container/v1beta1/marshalling.py | 36 +++ .../scaleway_async/dedibox/v1/marshalling.py | 226 +++++++++++++++++- .../document_db/v1beta1/marshalling.py | 84 ++++++- .../domain/v2beta1/marshalling.py | 148 +++++++++++- .../flexibleip/v1alpha1/marshalling.py | 12 + .../function/v1beta1/marshalling.py | 38 +++ .../iam/v1alpha1/marshalling.py | 70 +++++- .../scaleway_async/instance/v1/marshalling.py | 184 +++++++++++++- .../scaleway_async/iot/v1/marshalling.py | 58 ++++- .../jobs/v1alpha1/marshalling.py | 20 ++ .../scaleway_async/k8s/v1/marshalling.py | 48 +++- .../scaleway_async/lb/v1/marshalling.py | 154 +++++++++++- .../llm_inference/v1beta1/marshalling.py | 24 ++ .../marketplace/v2/marshalling.py | 14 +- .../scaleway_async/mnq/v1beta1/marshalling.py | 42 ++++ .../scaleway_async/rdb/v1/marshalling.py | 96 +++++++- .../scaleway_async/redis/v1/marshalling.py | 24 +- .../scaleway_async/registry/v1/marshalling.py | 14 ++ .../secret/v1alpha1/marshalling.py | 30 ++- .../secret/v1beta1/marshalling.py | 40 +++- .../serverless_sqldb/v1alpha1/marshalling.py | 12 + .../tem/v1alpha1/marshalling.py | 52 ++++ .../scaleway_async/test/v1/marshalling.py | 4 + .../scaleway_async/vpc/v1/marshalling.py | 4 + .../scaleway_async/vpc/v2/marshalling.py | 12 + .../scaleway_async/vpcgw/v1/marshalling.py | 54 ++++- .../webhosting/v1alpha1/marshalling.py | 16 +- scaleway/scaleway/account/v2/marshalling.py | 4 + scaleway/scaleway/account/v3/marshalling.py | 4 + .../applesilicon/v1alpha1/marshalling.py | 20 +- scaleway/scaleway/baremetal/v1/marshalling.py | 54 ++++- .../scaleway/billing/v2alpha1/marshalling.py | 26 +- .../scaleway/billing/v2beta1/marshalling.py | 44 +++- .../scaleway/block/v1alpha1/marshalling.py | 38 ++- scaleway/scaleway/cockpit/v1/marshalling.py | 38 ++- .../scaleway/cockpit/v1beta1/marshalling.py | 28 ++- .../scaleway/container/v1beta1/marshalling.py | 36 +++ scaleway/scaleway/dedibox/v1/marshalling.py | 226 +++++++++++++++++- .../document_db/v1beta1/marshalling.py | 84 ++++++- .../scaleway/domain/v2beta1/marshalling.py | 148 +++++++++++- .../flexibleip/v1alpha1/marshalling.py | 12 + .../scaleway/function/v1beta1/marshalling.py | 38 +++ scaleway/scaleway/iam/v1alpha1/marshalling.py | 70 +++++- scaleway/scaleway/instance/v1/marshalling.py | 184 +++++++++++++- scaleway/scaleway/iot/v1/marshalling.py | 58 ++++- .../scaleway/jobs/v1alpha1/marshalling.py | 20 ++ scaleway/scaleway/k8s/v1/marshalling.py | 48 +++- scaleway/scaleway/lb/v1/marshalling.py | 154 +++++++++++- .../llm_inference/v1beta1/marshalling.py | 24 ++ .../scaleway/marketplace/v2/marshalling.py | 14 +- scaleway/scaleway/mnq/v1beta1/marshalling.py | 42 ++++ scaleway/scaleway/rdb/v1/marshalling.py | 96 +++++++- scaleway/scaleway/redis/v1/marshalling.py | 24 +- scaleway/scaleway/registry/v1/marshalling.py | 14 ++ .../scaleway/secret/v1alpha1/marshalling.py | 30 ++- .../scaleway/secret/v1beta1/marshalling.py | 40 +++- .../serverless_sqldb/v1alpha1/marshalling.py | 12 + scaleway/scaleway/tem/v1alpha1/marshalling.py | 52 ++++ scaleway/scaleway/test/v1/marshalling.py | 4 + scaleway/scaleway/vpc/v1/marshalling.py | 4 + scaleway/scaleway/vpc/v2/marshalling.py | 12 + scaleway/scaleway/vpcgw/v1/marshalling.py | 54 ++++- .../webhosting/v1alpha1/marshalling.py | 16 +- 72 files changed, 3414 insertions(+), 130 deletions(-) diff --git a/scaleway-async/scaleway_async/account/v2/marshalling.py b/scaleway-async/scaleway_async/account/v2/marshalling.py index 869668e9d..c577c2d2d 100644 --- a/scaleway-async/scaleway_async/account/v2/marshalling.py +++ b/scaleway-async/scaleway_async/account/v2/marshalling.py @@ -40,10 +40,14 @@ def unmarshal_Project(data: Any) -> Project: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Project(**args) diff --git a/scaleway-async/scaleway_async/account/v3/marshalling.py b/scaleway-async/scaleway_async/account/v3/marshalling.py index 2725094df..adf14cd3a 100644 --- a/scaleway-async/scaleway_async/account/v3/marshalling.py +++ b/scaleway-async/scaleway_async/account/v3/marshalling.py @@ -40,10 +40,14 @@ def unmarshal_Project(data: Any) -> Project: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Project(**args) diff --git a/scaleway-async/scaleway_async/applesilicon/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/applesilicon/v1alpha1/marshalling.py index 4524d1c52..9d92c06d2 100644 --- a/scaleway-async/scaleway_async/applesilicon/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/applesilicon/v1alpha1/marshalling.py @@ -82,7 +82,7 @@ def unmarshal_ServerTypeDisk(data: Any) -> ServerTypeDisk: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -101,7 +101,7 @@ def unmarshal_ServerTypeMemory(data: Any) -> ServerTypeMemory: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -127,18 +127,26 @@ def unmarshal_ServerType(data: Any) -> ServerType: field = data.get("cpu", None) if field is not None: args["cpu"] = unmarshal_ServerTypeCPU(field) + else: + args["cpu"] = None field = data.get("disk", None) if field is not None: args["disk"] = unmarshal_ServerTypeDisk(field) + else: + args["disk"] = None field = data.get("memory", None) if field is not None: args["memory"] = unmarshal_ServerTypeMemory(field) + else: + args["memory"] = None field = data.get("minimum_lease_duration", None) if field is not None: args["minimum_lease_duration"] = field + else: + args["minimum_lease_duration"] = None return ServerType(**args) @@ -155,7 +163,7 @@ def unmarshal_Server(data: Any) -> Server: if field is not None: args["id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -190,16 +198,22 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("deletable_at", None) if field is not None: args["deletable_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["deletable_at"] = None return Server(**args) diff --git a/scaleway-async/scaleway_async/baremetal/v1/marshalling.py b/scaleway-async/scaleway_async/baremetal/v1/marshalling.py index 60471d4da..dc637d6ca 100644 --- a/scaleway-async/scaleway_async/baremetal/v1/marshalling.py +++ b/scaleway-async/scaleway_async/baremetal/v1/marshalling.py @@ -111,6 +111,8 @@ def unmarshal_OSOSField(data: Any) -> OSOSField: field = data.get("default_value", None) if field is not None: args["default_value"] = field + else: + args["default_value"] = None return OSOSField(**args) @@ -154,22 +156,32 @@ def unmarshal_OS(data: Any) -> OS: field = data.get("ssh", None) if field is not None: args["ssh"] = unmarshal_OSOSField(field) + else: + args["ssh"] = None field = data.get("user", None) if field is not None: args["user"] = unmarshal_OSOSField(field) + else: + args["user"] = None field = data.get("password", None) if field is not None: args["password"] = unmarshal_OSOSField(field) + else: + args["password"] = None field = data.get("service_user", None) if field is not None: args["service_user"] = unmarshal_OSOSField(field) + else: + args["service_user"] = None field = data.get("service_password", None) if field is not None: args["service_password"] = unmarshal_OSOSField(field) + else: + args["service_password"] = None return OS(**args) @@ -217,7 +229,7 @@ def unmarshal_Disk(data: Any) -> Disk: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -236,7 +248,7 @@ def unmarshal_Memory(data: Any) -> Memory: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -282,10 +294,14 @@ def unmarshal_OfferOptionOffer(data: Any) -> OfferOptionOffer: field = data.get("price", None) if field is not None: args["price"] = unmarshal_Money(field) + else: + args["price"] = None field = data.get("os_id", None) if field is not None: args["os_id"] = field + else: + args["os_id"] = None return OfferOptionOffer(**args) @@ -302,7 +318,7 @@ def unmarshal_PersistentMemory(data: Any) -> PersistentMemory: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -373,10 +389,14 @@ def unmarshal_Offer(data: Any) -> Offer: field = data.get("price_per_hour", None) if field is not None: args["price_per_hour"] = unmarshal_Money(field) + else: + args["price_per_hour"] = None field = data.get("price_per_month", None) if field is not None: args["price_per_month"] = unmarshal_Money(field) + else: + args["price_per_month"] = None field = data.get("cpus", None) if field is not None: @@ -441,6 +461,8 @@ def unmarshal_Offer(data: Any) -> Offer: field = data.get("fee", None) if field is not None: args["fee"] = unmarshal_Money(field) + else: + args["fee"] = None return Offer(**args) @@ -499,14 +521,20 @@ def unmarshal_ServerPrivateNetwork(data: Any) -> ServerPrivateNetwork: field = data.get("vlan", None) if field is not None: args["vlan"] = field + else: + args["vlan"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return ServerPrivateNetwork(**args) @@ -577,6 +605,8 @@ def unmarshal_ServerOption(data: Any) -> ServerOption: field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return ServerOption(**args) @@ -635,10 +665,14 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("offer_id", None) if field is not None: @@ -681,10 +715,14 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("install", None) if field is not None: args["install"] = unmarshal_ServerInstall(field) + else: + args["install"] = None field = data.get("rescue_server", None) if field is not None: args["rescue_server"] = unmarshal_ServerRescueServer(field) + else: + args["rescue_server"] = None return Server(**args) @@ -701,7 +739,7 @@ def unmarshal_Setting(data: Any) -> Setting: if field is not None: args["id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -739,6 +777,8 @@ def unmarshal_BMCAccess(data: Any) -> BMCAccess: field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return BMCAccess(**args) @@ -754,6 +794,8 @@ def unmarshal_GetServerMetricsResponse(data: Any) -> GetServerMetricsResponse: field = data.get("pings", None) if field is not None: args["pings"] = unmarshal_TimeSeries(field) + else: + args["pings"] = None return GetServerMetricsResponse(**args) @@ -838,10 +880,14 @@ def unmarshal_ServerEvent(data: Any) -> ServerEvent: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return ServerEvent(**args) diff --git a/scaleway-async/scaleway_async/billing/v2alpha1/marshalling.py b/scaleway-async/scaleway_async/billing/v2alpha1/marshalling.py index 351e16f24..3fe90b663 100644 --- a/scaleway-async/scaleway_async/billing/v2alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/billing/v2alpha1/marshalling.py @@ -48,6 +48,8 @@ def unmarshal_GetConsumptionResponseConsumption( field = data.get("value", None) if field is not None: args["value"] = unmarshal_Money(field) + else: + args["value"] = None return GetConsumptionResponseConsumption(**args) @@ -71,6 +73,8 @@ def unmarshal_GetConsumptionResponse(data: Any) -> GetConsumptionResponse: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return GetConsumptionResponse(**args) @@ -86,6 +90,8 @@ def unmarshal_DiscountCoupon(data: Any) -> DiscountCoupon: field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return DiscountCoupon(**args) @@ -98,7 +104,7 @@ def unmarshal_DiscountFilter(data: Any) -> DiscountFilter: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -156,18 +162,26 @@ def unmarshal_Discount(data: Any) -> Discount: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("start_date", None) if field is not None: args["start_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["start_date"] = None field = data.get("stop_date", None) if field is not None: args["stop_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["stop_date"] = None field = data.get("coupon", None) if field is not None: args["coupon"] = unmarshal_DiscountCoupon(field) + else: + args["coupon"] = None return Discount(**args) @@ -216,24 +230,34 @@ def unmarshal_Invoice(data: Any) -> Invoice: field = data.get("start_date", None) if field is not None: args["start_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["start_date"] = None field = data.get("issued_date", None) if field is not None: args["issued_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["issued_date"] = None field = data.get("due_date", None) if field is not None: args["due_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["due_date"] = None field = data.get("total_untaxed", None) if field is not None: args["total_untaxed"] = unmarshal_Money(field) + else: + args["total_untaxed"] = None field = data.get("total_taxed", None) if field is not None: args["total_taxed"] = unmarshal_Money(field) + else: + args["total_taxed"] = None return Invoice(**args) diff --git a/scaleway-async/scaleway_async/billing/v2beta1/marshalling.py b/scaleway-async/scaleway_async/billing/v2beta1/marshalling.py index 48328d555..69debc7a9 100644 --- a/scaleway-async/scaleway_async/billing/v2beta1/marshalling.py +++ b/scaleway-async/scaleway_async/billing/v2beta1/marshalling.py @@ -44,28 +44,38 @@ def unmarshal_Invoice(data: Any) -> Invoice: field = data.get("start_date", None) if field is not None: args["start_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["start_date"] = None field = data.get("stop_date", None) if field is not None: args["stop_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["stop_date"] = None field = data.get("billing_period", None) if field is not None: args["billing_period"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["billing_period"] = None field = data.get("issued_date", None) if field is not None: args["issued_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["issued_date"] = None field = data.get("due_date", None) if field is not None: args["due_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["due_date"] = None - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -84,22 +94,32 @@ def unmarshal_Invoice(data: Any) -> Invoice: field = data.get("total_untaxed", None) if field is not None: args["total_untaxed"] = unmarshal_Money(field) + else: + args["total_untaxed"] = None field = data.get("total_taxed", None) if field is not None: args["total_taxed"] = unmarshal_Money(field) + else: + args["total_taxed"] = None field = data.get("total_tax", None) if field is not None: args["total_tax"] = unmarshal_Money(field) + else: + args["total_tax"] = None field = data.get("total_discount", None) if field is not None: args["total_discount"] = unmarshal_Money(field) + else: + args["total_discount"] = None field = data.get("total_undiscount", None) if field is not None: args["total_undiscount"] = unmarshal_Money(field) + else: + args["total_undiscount"] = None return Invoice(**args) @@ -145,6 +165,8 @@ def unmarshal_ListConsumptionsResponseConsumption( field = data.get("value", None) if field is not None: args["value"] = unmarshal_Money(field) + else: + args["value"] = None return ListConsumptionsResponseConsumption(**args) @@ -176,6 +198,8 @@ def unmarshal_ListConsumptionsResponse(data: Any) -> ListConsumptionsResponse: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return ListConsumptionsResponse(**args) @@ -191,6 +215,8 @@ def unmarshal_DiscountCoupon(data: Any) -> DiscountCoupon: field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return DiscountCoupon(**args) @@ -203,7 +229,7 @@ def unmarshal_DiscountFilter(data: Any) -> DiscountFilter: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -261,18 +287,26 @@ def unmarshal_Discount(data: Any) -> Discount: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("start_date", None) if field is not None: args["start_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["start_date"] = None field = data.get("stop_date", None) if field is not None: args["stop_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["stop_date"] = None field = data.get("coupon", None) if field is not None: args["coupon"] = unmarshal_DiscountCoupon(field) + else: + args["coupon"] = None return Discount(**args) @@ -338,10 +372,14 @@ def unmarshal_ListTaxesResponseTax(data: Any) -> ListTaxesResponseTax: field = data.get("rate", None) if field is not None: args["rate"] = field + else: + args["rate"] = None field = data.get("total_tax_value", None) if field is not None: args["total_tax_value"] = field + else: + args["total_tax_value"] = None return ListTaxesResponseTax(**args) @@ -369,5 +407,7 @@ def unmarshal_ListTaxesResponse(data: Any) -> ListTaxesResponse: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return ListTaxesResponse(**args) diff --git a/scaleway-async/scaleway_async/block/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/block/v1alpha1/marshalling.py index 4381f8215..b7f7e70b6 100644 --- a/scaleway-async/scaleway_async/block/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/block/v1alpha1/marshalling.py @@ -51,7 +51,7 @@ def unmarshal_Reference(data: Any) -> Reference: if field is not None: args["product_resource_id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -62,6 +62,8 @@ def unmarshal_Reference(data: Any) -> Reference: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return Reference(**args) @@ -82,7 +84,7 @@ def unmarshal_SnapshotParentVolume(data: Any) -> SnapshotParentVolume: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -135,21 +137,27 @@ def unmarshal_Snapshot(data: Any) -> Snapshot: if field is not None: args["zone"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field field = data.get("parent_volume", None) if field is not None: args["parent_volume"] = unmarshal_SnapshotParentVolume(field) + else: + args["parent_volume"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Snapshot(**args) @@ -162,13 +170,15 @@ def unmarshal_VolumeSpecifications(data: Any) -> VolumeSpecifications: args: Dict[str, Any] = {} - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field field = data.get("perf_iops", None) if field is not None: args["perf_iops"] = field + else: + args["perf_iops"] = None return VolumeSpecifications(**args) @@ -189,7 +199,7 @@ def unmarshal_Volume(data: Any) -> Volume: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -210,14 +220,20 @@ def unmarshal_Volume(data: Any) -> Volume: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("parent_snapshot_id", None) if field is not None: args["parent_snapshot_id"] = field + else: + args["parent_snapshot_id"] = None field = data.get("status", None) if field is not None: @@ -234,12 +250,16 @@ def unmarshal_Volume(data: Any) -> Volume: field = data.get("specs", None) if field is not None: args["specs"] = unmarshal_VolumeSpecifications(field) + else: + args["specs"] = None field = data.get("last_detached_at", None) if field is not None: args["last_detached_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_detached_at"] = None return Volume(**args) @@ -273,21 +293,27 @@ def unmarshal_VolumeType(data: Any) -> VolumeType: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("pricing", None) if field is not None: args["pricing"] = unmarshal_Money(field) + else: + args["pricing"] = None field = data.get("snapshot_pricing", None) if field is not None: args["snapshot_pricing"] = unmarshal_Money(field) + else: + args["snapshot_pricing"] = None field = data.get("specs", None) if field is not None: args["specs"] = unmarshal_VolumeSpecifications(field) + else: + args["specs"] = None return VolumeType(**args) diff --git a/scaleway-async/scaleway_async/cockpit/v1/marshalling.py b/scaleway-async/scaleway_async/cockpit/v1/marshalling.py index 12bc2dd4b..ac633628f 100644 --- a/scaleway-async/scaleway_async/cockpit/v1/marshalling.py +++ b/scaleway-async/scaleway_async/cockpit/v1/marshalling.py @@ -74,6 +74,8 @@ def unmarshal_ContactPoint(data: Any) -> ContactPoint: field = data.get("email", None) if field is not None: args["email"] = unmarshal_ContactPointEmail(field) + else: + args["email"] = None return ContactPoint(**args) @@ -102,7 +104,7 @@ def unmarshal_DataSource(data: Any) -> DataSource: if field is not None: args["url"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -121,10 +123,14 @@ def unmarshal_DataSource(data: Any) -> DataSource: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return DataSource(**args) @@ -183,6 +189,8 @@ def unmarshal_GrafanaUser(data: Any) -> GrafanaUser: field = data.get("password", None) if field is not None: args["password"] = field + else: + args["password"] = None return GrafanaUser(**args) @@ -218,14 +226,20 @@ def unmarshal_Plan(data: Any) -> Plan: field = data.get("retention_metrics_interval", None) if field is not None: args["retention_metrics_interval"] = field + else: + args["retention_metrics_interval"] = None field = data.get("retention_logs_interval", None) if field is not None: args["retention_logs_interval"] = field + else: + args["retention_logs_interval"] = None field = data.get("retention_traces_interval", None) if field is not None: args["retention_traces_interval"] = field + else: + args["retention_traces_interval"] = None return Plan(**args) @@ -261,14 +275,20 @@ def unmarshal_Token(data: Any) -> Token: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("secret_key", None) if field is not None: args["secret_key"] = field + else: + args["secret_key"] = None return Token(**args) @@ -296,6 +316,8 @@ def unmarshal_AlertManager(data: Any) -> AlertManager: field = data.get("alert_manager_url", None) if field is not None: args["alert_manager_url"] = field + else: + args["alert_manager_url"] = None return AlertManager(**args) @@ -488,10 +510,14 @@ def unmarshal_Usage(data: Any) -> Usage: field = data.get("data_source_id", None) if field is not None: args["data_source_id"] = field + else: + args["data_source_id"] = None field = data.get("interval", None) if field is not None: args["interval"] = field + else: + args["interval"] = None return Usage(**args) @@ -507,22 +533,32 @@ def unmarshal_UsageOverview(data: Any) -> UsageOverview: field = data.get("scaleway_metrics_usage", None) if field is not None: args["scaleway_metrics_usage"] = unmarshal_Usage(field) + else: + args["scaleway_metrics_usage"] = None field = data.get("scaleway_logs_usage", None) if field is not None: args["scaleway_logs_usage"] = unmarshal_Usage(field) + else: + args["scaleway_logs_usage"] = None field = data.get("external_metrics_usage", None) if field is not None: args["external_metrics_usage"] = unmarshal_Usage(field) + else: + args["external_metrics_usage"] = None field = data.get("external_logs_usage", None) if field is not None: args["external_logs_usage"] = unmarshal_Usage(field) + else: + args["external_logs_usage"] = None field = data.get("external_traces_usage", None) if field is not None: args["external_traces_usage"] = unmarshal_Usage(field) + else: + args["external_traces_usage"] = None return UsageOverview(**args) diff --git a/scaleway-async/scaleway_async/cockpit/v1beta1/marshalling.py b/scaleway-async/scaleway_async/cockpit/v1beta1/marshalling.py index edf291af9..47df80c4d 100644 --- a/scaleway-async/scaleway_async/cockpit/v1beta1/marshalling.py +++ b/scaleway-async/scaleway_async/cockpit/v1beta1/marshalling.py @@ -73,6 +73,8 @@ def unmarshal_ContactPoint(data: Any) -> ContactPoint: field = data.get("email", None) if field is not None: args["email"] = unmarshal_ContactPointEmail(field) + else: + args["email"] = None return ContactPoint(**args) @@ -101,7 +103,7 @@ def unmarshal_Datasource(data: Any) -> Datasource: if field is not None: args["url"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -166,6 +168,8 @@ def unmarshal_GrafanaUser(data: Any) -> GrafanaUser: field = data.get("password", None) if field is not None: args["password"] = field + else: + args["password"] = None return GrafanaUser(**args) @@ -240,18 +244,26 @@ def unmarshal_Token(data: Any) -> Token: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("scopes", None) if field is not None: args["scopes"] = unmarshal_TokenScopes(field) + else: + args["scopes"] = None field = data.get("secret_key", None) if field is not None: args["secret_key"] = field + else: + args["secret_key"] = None return Token(**args) @@ -322,14 +334,20 @@ def unmarshal_Plan(data: Any) -> Plan: field = data.get("retention_metrics_interval", None) if field is not None: args["retention_metrics_interval"] = field + else: + args["retention_metrics_interval"] = None field = data.get("retention_logs_interval", None) if field is not None: args["retention_logs_interval"] = field + else: + args["retention_logs_interval"] = None field = data.get("retention_traces_interval", None) if field is not None: args["retention_traces_interval"] = field + else: + args["retention_traces_interval"] = None return Plan(**args) @@ -357,18 +375,26 @@ def unmarshal_Cockpit(data: Any) -> Cockpit: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("endpoints", None) if field is not None: args["endpoints"] = unmarshal_CockpitEndpoints(field) + else: + args["endpoints"] = None field = data.get("plan", None) if field is not None: args["plan"] = unmarshal_Plan(field) + else: + args["plan"] = None return Cockpit(**args) diff --git a/scaleway-async/scaleway_async/container/v1beta1/marshalling.py b/scaleway-async/scaleway_async/container/v1beta1/marshalling.py index 224844ea6..98c072c3e 100644 --- a/scaleway-async/scaleway_async/container/v1beta1/marshalling.py +++ b/scaleway-async/scaleway_async/container/v1beta1/marshalling.py @@ -122,14 +122,20 @@ def unmarshal_Container(data: Any) -> Container: field = data.get("timeout", None) if field is not None: args["timeout"] = field + else: + args["timeout"] = None field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("domain_name", None) if field is not None: @@ -193,6 +199,8 @@ def unmarshal_Cron(data: Any) -> Cron: field = data.get("args", None) if field is not None: args["args"] = field + else: + args["args"] = None return Cron(**args) @@ -228,6 +236,8 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None return Domain(**args) @@ -287,10 +297,14 @@ def unmarshal_Namespace(data: Any) -> Namespace: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return Namespace(**args) @@ -318,22 +332,32 @@ def unmarshal_Token(data: Any) -> Token: field = data.get("container_id", None) if field is not None: args["container_id"] = field + else: + args["container_id"] = None field = data.get("namespace_id", None) if field is not None: args["namespace_id"] = field + else: + args["namespace_id"] = None field = data.get("public_key", None) if field is not None: args["public_key"] = field + else: + args["public_key"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return Token(**args) @@ -365,6 +389,8 @@ def unmarshal_TriggerMnqNatsClientConfig(data: Any) -> TriggerMnqNatsClientConfi field = data.get("mnq_credential_id", None) if field is not None: args["mnq_credential_id"] = field + else: + args["mnq_credential_id"] = None return TriggerMnqNatsClientConfig(**args) @@ -392,6 +418,8 @@ def unmarshal_TriggerMnqSqsClientConfig(data: Any) -> TriggerMnqSqsClientConfig: field = data.get("mnq_credential_id", None) if field is not None: args["mnq_credential_id"] = field + else: + args["mnq_credential_id"] = None return TriggerMnqSqsClientConfig(**args) @@ -458,18 +486,26 @@ def unmarshal_Trigger(data: Any) -> Trigger: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("scw_sqs_config", None) if field is not None: args["scw_sqs_config"] = unmarshal_TriggerMnqSqsClientConfig(field) + else: + args["scw_sqs_config"] = None field = data.get("scw_nats_config", None) if field is not None: args["scw_nats_config"] = unmarshal_TriggerMnqNatsClientConfig(field) + else: + args["scw_nats_config"] = None field = data.get("sqs_config", None) if field is not None: args["sqs_config"] = unmarshal_TriggerSqsClientConfig(field) + else: + args["sqs_config"] = None return Trigger(**args) diff --git a/scaleway-async/scaleway_async/dedibox/v1/marshalling.py b/scaleway-async/scaleway_async/dedibox/v1/marshalling.py index 4452a9a73..8bdd9b9b6 100644 --- a/scaleway-async/scaleway_async/dedibox/v1/marshalling.py +++ b/scaleway-async/scaleway_async/dedibox/v1/marshalling.py @@ -222,7 +222,7 @@ def unmarshal_Disk(data: Any) -> Disk: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -241,7 +241,7 @@ def unmarshal_Memory(data: Any) -> Memory: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -306,7 +306,7 @@ def unmarshal_OfferAntiDosInfo(data: Any) -> OfferAntiDosInfo: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -354,6 +354,8 @@ def unmarshal_OfferFailoverBlockInfo(data: Any) -> OfferFailoverBlockInfo: field = data.get("onetime_fees", None) if field is not None: args["onetime_fees"] = unmarshal_Offer(field) + else: + args["onetime_fees"] = None return OfferFailoverBlockInfo(**args) @@ -369,6 +371,8 @@ def unmarshal_OfferFailoverIpInfo(data: Any) -> OfferFailoverIpInfo: field = data.get("onetime_fees", None) if field is not None: args["onetime_fees"] = unmarshal_Offer(field) + else: + args["onetime_fees"] = None return OfferFailoverIpInfo(**args) @@ -497,10 +501,14 @@ def unmarshal_OfferServerInfo(data: Any) -> OfferServerInfo: field = data.get("rpn_version", None) if field is not None: args["rpn_version"] = field + else: + args["rpn_version"] = None field = data.get("onetime_fees", None) if field is not None: args["onetime_fees"] = unmarshal_Offer(field) + else: + args["onetime_fees"] = None return OfferServerInfo(**args) @@ -608,54 +616,80 @@ def unmarshal_Offer(data: Any) -> Offer: field = data.get("pricing", None) if field is not None: args["pricing"] = unmarshal_Money(field) + else: + args["pricing"] = None field = data.get("server_info", None) if field is not None: args["server_info"] = unmarshal_OfferServerInfo(field) + else: + args["server_info"] = None field = data.get("service_level_info", None) if field is not None: args["service_level_info"] = unmarshal_OfferServiceLevelInfo(field) + else: + args["service_level_info"] = None field = data.get("rpn_info", None) if field is not None: args["rpn_info"] = unmarshal_OfferRPNInfo(field) + else: + args["rpn_info"] = None field = data.get("san_info", None) if field is not None: args["san_info"] = unmarshal_OfferSANInfo(field) + else: + args["san_info"] = None field = data.get("antidos_info", None) if field is not None: args["antidos_info"] = unmarshal_OfferAntiDosInfo(field) + else: + args["antidos_info"] = None field = data.get("backup_info", None) if field is not None: args["backup_info"] = unmarshal_OfferBackupInfo(field) + else: + args["backup_info"] = None field = data.get("usb_storage_info", None) if field is not None: args["usb_storage_info"] = unmarshal_OfferStorageInfo(field) + else: + args["usb_storage_info"] = None field = data.get("storage_info", None) if field is not None: args["storage_info"] = unmarshal_OfferStorageInfo(field) + else: + args["storage_info"] = None field = data.get("license_info", None) if field is not None: args["license_info"] = unmarshal_OfferLicenseInfo(field) + else: + args["license_info"] = None field = data.get("failover_ip_info", None) if field is not None: args["failover_ip_info"] = unmarshal_OfferFailoverIpInfo(field) + else: + args["failover_ip_info"] = None field = data.get("failover_block_info", None) if field is not None: args["failover_block_info"] = unmarshal_OfferFailoverBlockInfo(field) + else: + args["failover_block_info"] = None field = data.get("bandwidth_info", None) if field is not None: args["bandwidth_info"] = unmarshal_OfferBandwidthInfo(field) + else: + args["bandwidth_info"] = None return Offer(**args) @@ -676,7 +710,7 @@ def unmarshal_OS(data: Any) -> OS: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -739,24 +773,34 @@ def unmarshal_OS(data: Any) -> OS: field = data.get("max_partitions", None) if field is not None: args["max_partitions"] = field + else: + args["max_partitions"] = None field = data.get("panel_password_regex", None) if field is not None: args["panel_password_regex"] = field + else: + args["panel_password_regex"] = None field = data.get("requires_valid_hostname", None) if field is not None: args["requires_valid_hostname"] = field + else: + args["requires_valid_hostname"] = None field = data.get("hostname_regex", None) if field is not None: args["hostname_regex"] = field + else: + args["hostname_regex"] = None field = data.get("released_at", None) if field is not None: args["released_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["released_at"] = None return OS(**args) @@ -800,6 +844,8 @@ def unmarshal_RpnSan(data: Any) -> RpnSan: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("offer_name", None) if field is not None: @@ -828,22 +874,30 @@ def unmarshal_RpnSan(data: Any) -> RpnSan: field = data.get("offer", None) if field is not None: args["offer"] = unmarshal_Offer(field) + else: + args["offer"] = None field = data.get("delivered_at", None) if field is not None: args["delivered_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["delivered_at"] = None field = data.get("terminated_at", None) if field is not None: args["terminated_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["terminated_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return RpnSan(**args) @@ -864,7 +918,7 @@ def unmarshal_RpnGroup(data: Any) -> RpnGroup: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -891,6 +945,8 @@ def unmarshal_RpnGroup(data: Any) -> RpnGroup: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return RpnGroup(**args) @@ -915,7 +971,7 @@ def unmarshal_NetworkInterface(data: Any) -> NetworkInterface: if field is not None: args["mac"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -966,18 +1022,26 @@ def unmarshal_ServerOption(data: Any) -> ServerOption: field = data.get("offer", None) if field is not None: args["offer"] = unmarshal_Offer(field) + else: + args["offer"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None return ServerOption(**args) @@ -1030,6 +1094,8 @@ def unmarshal_Server(data: Any) -> Server: args["rebooted_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["rebooted_at"] = None field = data.get("status", None) if field is not None: @@ -1084,34 +1150,50 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None field = data.get("offer", None) if field is not None: args["offer"] = unmarshal_Offer(field) + else: + args["offer"] = None field = data.get("location", None) if field is not None: args["location"] = unmarshal_ServerLocation(field) + else: + args["location"] = None field = data.get("os", None) if field is not None: args["os"] = unmarshal_OS(field) + else: + args["os"] = None field = data.get("level", None) if field is not None: args["level"] = unmarshal_ServiceLevel(field) + else: + args["level"] = None field = data.get("rescue_os", None) if field is not None: args["rescue_os"] = unmarshal_OS(field) + else: + args["rescue_os"] = None return Server(**args) @@ -1163,7 +1245,7 @@ def unmarshal_RpnV2Group(data: Any) -> RpnV2Group: if field is not None: args["project_id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -1186,10 +1268,14 @@ def unmarshal_RpnV2Group(data: Any) -> RpnV2Group: field = data.get("subnet", None) if field is not None: args["subnet"] = unmarshal_RpnV2GroupSubnet(field) + else: + args["subnet"] = None field = data.get("rpnv1_group", None) if field is not None: args["rpnv1_group"] = unmarshal_RpnGroup(field) + else: + args["rpnv1_group"] = None return RpnV2Group(**args) @@ -1210,37 +1296,49 @@ def unmarshal_Service(data: Any) -> Service: if field is not None: args["provisioning_status"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("resource_id", None) if field is not None: args["resource_id"] = field + else: + args["resource_id"] = None field = data.get("offer", None) if field is not None: args["offer"] = unmarshal_Offer(field) + else: + args["offer"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("delivered_at", None) if field is not None: args["delivered_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["delivered_at"] = None field = data.get("terminated_at", None) if field is not None: args["terminated_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["terminated_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return Service(**args) @@ -1324,25 +1422,33 @@ def unmarshal_FailoverIP(data: Any) -> FailoverIP: if field is not None: args["status"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("mac", None) if field is not None: args["mac"] = field + else: + args["mac"] = None field = data.get("server_id", None) if field is not None: args["server_id"] = field + else: + args["server_id"] = None field = data.get("block", None) if field is not None: args["block"] = unmarshal_FailoverBlock(field) + else: + args["block"] = None field = data.get("server_zone", None) if field is not None: args["server_zone"] = field + else: + args["server_zone"] = None return FailoverIP(**args) @@ -1374,6 +1480,8 @@ def unmarshal_BMCAccess(data: Any) -> BMCAccess: field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return BMCAccess(**args) @@ -1468,6 +1576,8 @@ def unmarshal_CanOrderResponse(data: Any) -> CanOrderResponse: field = data.get("message", None) if field is not None: args["message"] = field + else: + args["message"] = None return CanOrderResponse(**args) @@ -1579,6 +1689,8 @@ def unmarshal_GetRpnStatusResponse(data: Any) -> GetRpnStatusResponse: field = data.get("operations_left", None) if field is not None: args["operations_left"] = field + else: + args["operations_left"] = None return GetRpnStatusResponse(**args) @@ -1655,18 +1767,26 @@ def unmarshal_Invoice(data: Any) -> Invoice: field = data.get("total_with_taxes", None) if field is not None: args["total_with_taxes"] = unmarshal_Money(field) + else: + args["total_with_taxes"] = None field = data.get("total_without_taxes", None) if field is not None: args["total_without_taxes"] = unmarshal_Money(field) + else: + args["total_without_taxes"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("paid_at", None) if field is not None: args["paid_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["paid_at"] = None return Invoice(**args) @@ -1765,18 +1885,26 @@ def unmarshal_InvoiceSummary(data: Any) -> InvoiceSummary: field = data.get("total_with_taxes", None) if field is not None: args["total_with_taxes"] = unmarshal_Money(field) + else: + args["total_with_taxes"] = None field = data.get("total_without_taxes", None) if field is not None: args["total_without_taxes"] = unmarshal_Money(field) + else: + args["total_without_taxes"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("paid_at", None) if field is not None: args["paid_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["paid_at"] = None return InvoiceSummary(**args) @@ -1852,21 +1980,27 @@ def unmarshal_RpnSanIp(data: Any) -> RpnSanIp: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("server", None) if field is not None: args["server"] = unmarshal_RpnSanIpServer(field) + else: + args["server"] = None field = data.get("rpnv2_group", None) if field is not None: args["rpnv2_group"] = unmarshal_RpnSanIpRpnV2Group(field) + else: + args["rpnv2_group"] = None field = data.get("ip", None) if field is not None: args["ip"] = unmarshal_IP(field) + else: + args["ip"] = None return RpnSanIp(**args) @@ -1955,20 +2089,28 @@ def unmarshal_RefundSummary(data: Any) -> RefundSummary: field = data.get("total_with_taxes", None) if field is not None: args["total_with_taxes"] = unmarshal_Money(field) + else: + args["total_with_taxes"] = None field = data.get("total_without_taxes", None) if field is not None: args["total_without_taxes"] = unmarshal_Money(field) + else: + args["total_without_taxes"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("refunded_at", None) if field is not None: args["refunded_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["refunded_at"] = None return RefundSummary(**args) @@ -2106,14 +2248,20 @@ def unmarshal_RpnGroupMember(data: Any) -> RpnGroupMember: field = data.get("san_server", None) if field is not None: args["san_server"] = unmarshal_RpnSanServer(field) + else: + args["san_server"] = None field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None field = data.get("speed", None) if field is not None: args["speed"] = field + else: + args["speed"] = None return RpnGroupMember(**args) @@ -2220,6 +2368,8 @@ def unmarshal_RpnSanSummary(data: Any) -> RpnSanSummary: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("offer_name", None) if field is not None: @@ -2246,16 +2396,22 @@ def unmarshal_RpnSanSummary(data: Any) -> RpnSanSummary: args["delivered_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["delivered_at"] = None field = data.get("terminated_at", None) if field is not None: args["terminated_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["terminated_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return RpnSanSummary(**args) @@ -2328,10 +2484,14 @@ def unmarshal_RpnServerCapability(data: Any) -> RpnServerCapability: field = data.get("ip_address", None) if field is not None: args["ip_address"] = field + else: + args["ip_address"] = None field = data.get("rpn_version", None) if field is not None: args["rpn_version"] = field + else: + args["rpn_version"] = None return RpnServerCapability(**args) @@ -2407,14 +2567,20 @@ def unmarshal_RpnV2Member(data: Any) -> RpnV2Member: field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None field = data.get("rpnv1_group", None) if field is not None: args["rpnv1_group"] = unmarshal_RpnGroup(field) + else: + args["rpnv1_group"] = None field = data.get("speed", None) if field is not None: args["speed"] = field + else: + args["speed"] = None return RpnV2Member(**args) @@ -2442,20 +2608,28 @@ def unmarshal_Log(data: Any) -> Log: field = data.get("group", None) if field is not None: args["group"] = unmarshal_RpnV2Group(field) + else: + args["group"] = None field = data.get("member", None) if field is not None: args["member"] = unmarshal_RpnV2Member(field) + else: + args["member"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("finished_at", None) if field is not None: args["finished_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["finished_at"] = None return Log(**args) @@ -2537,7 +2711,7 @@ def unmarshal_ServerDisk(data: Any) -> ServerDisk: if field is not None: args["connector"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -2592,6 +2766,8 @@ def unmarshal_ServerEvent(data: Any) -> ServerEvent: field = data.get("date", None) if field is not None: args["date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["date"] = None return ServerEvent(**args) @@ -2648,14 +2824,20 @@ def unmarshal_ServerSummary(data: Any) -> ServerSummary: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None field = data.get("offer_id", None) if field is not None: @@ -2692,14 +2874,20 @@ def unmarshal_ServerSummary(data: Any) -> ServerSummary: field = data.get("os_id", None) if field is not None: args["os_id"] = field + else: + args["os_id"] = None field = data.get("level", None) if field is not None: args["level"] = unmarshal_ServiceLevel(field) + else: + args["level"] = None field = data.get("rpn_version", None) if field is not None: args["rpn_version"] = field + else: + args["rpn_version"] = None return ServerSummary(**args) @@ -2834,20 +3022,28 @@ def unmarshal_Refund(data: Any) -> Refund: field = data.get("total_with_taxes", None) if field is not None: args["total_with_taxes"] = unmarshal_Money(field) + else: + args["total_with_taxes"] = None field = data.get("total_without_taxes", None) if field is not None: args["total_without_taxes"] = unmarshal_Money(field) + else: + args["total_without_taxes"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("refunded_at", None) if field is not None: args["refunded_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["refunded_at"] = None return Refund(**args) @@ -2887,7 +3083,7 @@ def unmarshal_Partition(data: Any) -> Partition: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -2910,6 +3106,8 @@ def unmarshal_Partition(data: Any) -> Partition: field = data.get("mount_point", None) if field is not None: args["mount_point"] = field + else: + args["mount_point"] = None return Partition(**args) @@ -2964,10 +3162,14 @@ def unmarshal_ServerInstall(data: Any) -> ServerInstall: field = data.get("user_login", None) if field is not None: args["user_login"] = field + else: + args["user_login"] = None field = data.get("panel_url", None) if field is not None: args["panel_url"] = field + else: + args["panel_url"] = None return ServerInstall(**args) diff --git a/scaleway-async/scaleway_async/document_db/v1beta1/marshalling.py b/scaleway-async/scaleway_async/document_db/v1beta1/marshalling.py index 8efeb6af8..b4b876ca3 100644 --- a/scaleway-async/scaleway_async/document_db/v1beta1/marshalling.py +++ b/scaleway-async/scaleway_async/document_db/v1beta1/marshalling.py @@ -155,26 +155,38 @@ def unmarshal_Endpoint(data: Any) -> Endpoint: field = data.get("ip", None) if field is not None: args["ip"] = field + else: + args["ip"] = None field = data.get("name", None) if field is not None: args["name"] = field + else: + args["name"] = None field = data.get("private_network", None) if field is not None: args["private_network"] = unmarshal_EndpointPrivateNetworkDetails(field) + else: + args["private_network"] = None field = data.get("load_balancer", None) if field is not None: args["load_balancer"] = unmarshal_EndpointLoadBalancerDetails(field) + else: + args["load_balancer"] = None field = data.get("direct_access", None) if field is not None: args["direct_access"] = unmarshal_EndpointDirectAccessDetails(field) + else: + args["direct_access"] = None field = data.get("hostname", None) if field is not None: args["hostname"] = field + else: + args["hostname"] = None return Endpoint(**args) @@ -198,18 +210,26 @@ def unmarshal_Maintenance(data: Any) -> Maintenance: field = data.get("starts_at", None) if field is not None: args["starts_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["starts_at"] = None field = data.get("stops_at", None) if field is not None: args["stops_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["stops_at"] = None field = data.get("closed_at", None) if field is not None: args["closed_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["closed_at"] = None field = data.get("forced_at", None) if field is not None: args["forced_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["forced_at"] = None return Maintenance(**args) @@ -301,14 +321,20 @@ def unmarshal_InstanceLog(data: Any) -> InstanceLog: field = data.get("download_url", None) if field is not None: args["download_url"] = field + else: + args["download_url"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return InstanceLog(**args) @@ -338,6 +364,8 @@ def unmarshal_BackupSchedule(data: Any) -> BackupSchedule: args["next_run_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["next_run_at"] = None return BackupSchedule(**args) @@ -372,10 +400,14 @@ def unmarshal_LogsPolicy(data: Any) -> LogsPolicy: field = data.get("max_age_retention", None) if field is not None: args["max_age_retention"] = field + else: + args["max_age_retention"] = None field = data.get("total_disk_retention", None) if field is not None: args["total_disk_retention"] = field + else: + args["total_disk_retention"] = None return LogsPolicy(**args) @@ -415,7 +447,7 @@ def unmarshal_Volume(data: Any) -> Volume: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -423,7 +455,7 @@ def unmarshal_Volume(data: Any) -> Volume: if field is not None: args["size"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -457,10 +489,14 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None field = data.get("project_id", None) if field is not None: @@ -499,10 +535,14 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("endpoint", None) if field is not None: args["endpoint"] = unmarshal_Endpoint(field) + else: + args["endpoint"] = None field = data.get("backup_schedule", None) if field is not None: args["backup_schedule"] = unmarshal_BackupSchedule(field) + else: + args["backup_schedule"] = None field = data.get("read_replicas", None) if field is not None: @@ -539,6 +579,8 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("logs_policy", None) if field is not None: args["logs_policy"] = unmarshal_LogsPolicy(field) + else: + args["logs_policy"] = None return Instance(**args) @@ -574,11 +616,11 @@ def unmarshal_SnapshotVolumeType(data: Any) -> SnapshotVolumeType: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -624,22 +666,32 @@ def unmarshal_Snapshot(data: Any) -> Snapshot: field = data.get("size", None) if field is not None: args["size"] = field + else: + args["size"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("volume_type", None) if field is not None: args["volume_type"] = unmarshal_SnapshotVolumeType(field) + else: + args["volume_type"] = None return Snapshot(**args) @@ -694,6 +746,8 @@ def unmarshal_ACLRule(data: Any) -> ACLRule: field = data.get("port", None) if field is not None: args["port"] = field + else: + args["port"] = None return ACLRule(**args) @@ -818,26 +872,38 @@ def unmarshal_EngineSetting(data: Any) -> EngineSetting: field = data.get("unit", None) if field is not None: args["unit"] = field + else: + args["unit"] = None field = data.get("string_constraint", None) if field is not None: args["string_constraint"] = field + else: + args["string_constraint"] = None field = data.get("int_min", None) if field is not None: args["int_min"] = field + else: + args["int_min"] = None field = data.get("int_max", None) if field is not None: args["int_max"] = field + else: + args["int_max"] = None field = data.get("float_min", None) if field is not None: args["float_min"] = field + else: + args["float_min"] = None field = data.get("float_max", None) if field is not None: args["float_max"] = field + else: + args["float_max"] = None return EngineSetting(**args) @@ -883,6 +949,8 @@ def unmarshal_EngineVersion(data: Any) -> EngineVersion: args["end_of_life"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["end_of_life"] = None return EngineVersion(**args) @@ -1089,7 +1157,7 @@ def unmarshal_NodeTypeVolumeType(data: Any) -> NodeTypeVolumeType: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -1109,7 +1177,7 @@ def unmarshal_NodeTypeVolumeType(data: Any) -> NodeTypeVolumeType: if field is not None: args["chunk_size"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -1155,10 +1223,14 @@ def unmarshal_NodeType(data: Any) -> NodeType: field = data.get("volume_constraint", None) if field is not None: args["volume_constraint"] = unmarshal_NodeTypeVolumeConstraintSizes(field) + else: + args["volume_constraint"] = None field = data.get("is_bssd_compatible", None) if field is not None: args["is_bssd_compatible"] = field + else: + args["is_bssd_compatible"] = None field = data.get("available_volume_types", None) if field is not None: diff --git a/scaleway-async/scaleway_async/domain/v2beta1/marshalling.py b/scaleway-async/scaleway_async/domain/v2beta1/marshalling.py index c198aad52..fdeab0c31 100644 --- a/scaleway-async/scaleway_async/domain/v2beta1/marshalling.py +++ b/scaleway-async/scaleway_async/domain/v2beta1/marshalling.py @@ -137,6 +137,8 @@ def unmarshal_ContactExtensionFRAssociationInfo( args["publication_jo"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["publication_jo"] = None return ContactExtensionFRAssociationInfo(**args) @@ -241,24 +243,34 @@ def unmarshal_ContactExtensionFR(data: Any) -> ContactExtensionFR: field = data.get("individual_info", None) if field is not None: args["individual_info"] = unmarshal_ContactExtensionFRIndividualInfo(field) + else: + args["individual_info"] = None field = data.get("duns_info", None) if field is not None: args["duns_info"] = unmarshal_ContactExtensionFRDunsInfo(field) + else: + args["duns_info"] = None field = data.get("association_info", None) if field is not None: args["association_info"] = unmarshal_ContactExtensionFRAssociationInfo(field) + else: + args["association_info"] = None field = data.get("trademark_info", None) if field is not None: args["trademark_info"] = unmarshal_ContactExtensionFRTrademarkInfo(field) + else: + args["trademark_info"] = None field = data.get("code_auth_afnic_info", None) if field is not None: args["code_auth_afnic_info"] = unmarshal_ContactExtensionFRCodeAuthAfnicInfo( field ) + else: + args["code_auth_afnic_info"] = None return ContactExtensionFR(**args) @@ -390,14 +402,20 @@ def unmarshal_Contact(data: Any) -> Contact: args["questions"] = ( [unmarshal_ContactQuestion(v) for v in field] if field is not None else None ) + else: + args["questions"] = None field = data.get("extension_fr", None) if field is not None: args["extension_fr"] = unmarshal_ContactExtensionFR(field) + else: + args["extension_fr"] = None field = data.get("extension_eu", None) if field is not None: args["extension_eu"] = unmarshal_ContactExtensionEU(field) + else: + args["extension_eu"] = None field = data.get("email_status", None) if field is not None: @@ -414,6 +432,8 @@ def unmarshal_Contact(data: Any) -> Contact: field = data.get("extension_nl", None) if field is not None: args["extension_nl"] = unmarshal_ContactExtensionNL(field) + else: + args["extension_nl"] = None return Contact(**args) @@ -463,10 +483,14 @@ def unmarshal_DNSZone(data: Any) -> DNSZone: field = data.get("message", None) if field is not None: args["message"] = field + else: + args["message"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return DNSZone(**args) @@ -529,10 +553,14 @@ def unmarshal_SSLCertificate(data: Any) -> SSLCertificate: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None return SSLCertificate(**args) @@ -554,6 +582,8 @@ def unmarshal_CheckContactsCompatibilityResponseContactCheckResult( field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None return CheckContactsCompatibilityResponseContactCheckResult(**args) @@ -577,18 +607,24 @@ def unmarshal_CheckContactsCompatibilityResponse( args["owner_check_result"] = ( unmarshal_CheckContactsCompatibilityResponseContactCheckResult(field) ) + else: + args["owner_check_result"] = None field = data.get("administrative_check_result", None) if field is not None: args["administrative_check_result"] = ( unmarshal_CheckContactsCompatibilityResponseContactCheckResult(field) ) + else: + args["administrative_check_result"] = None field = data.get("technical_check_result", None) if field is not None: args["technical_check_result"] = ( unmarshal_CheckContactsCompatibilityResponseContactCheckResult(field) ) + else: + args["technical_check_result"] = None return CheckContactsCompatibilityResponse(**args) @@ -660,7 +696,7 @@ def unmarshal_DSRecordDigest(data: Any) -> DSRecordDigest: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -671,6 +707,8 @@ def unmarshal_DSRecordDigest(data: Any) -> DSRecordDigest: field = data.get("public_key", None) if field is not None: args["public_key"] = unmarshal_DSRecordPublicKey(field) + else: + args["public_key"] = None return DSRecordDigest(**args) @@ -694,10 +732,14 @@ def unmarshal_DSRecord(data: Any) -> DSRecord: field = data.get("digest", None) if field is not None: args["digest"] = unmarshal_DSRecordDigest(field) + else: + args["digest"] = None field = data.get("public_key", None) if field is not None: args["public_key"] = unmarshal_DSRecordPublicKey(field) + else: + args["public_key"] = None return DSRecord(**args) @@ -721,6 +763,8 @@ def unmarshal_TldOffer(data: Any) -> TldOffer: field = data.get("price", None) if field is not None: args["price"] = unmarshal_Money(field) + else: + args["price"] = None return TldOffer(**args) @@ -870,14 +914,20 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("dnssec", None) if field is not None: args["dnssec"] = unmarshal_DomainDNSSEC(field) + else: + args["dnssec"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("status", None) if field is not None: @@ -902,30 +952,42 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("owner_contact", None) if field is not None: args["owner_contact"] = unmarshal_Contact(field) + else: + args["owner_contact"] = None field = data.get("technical_contact", None) if field is not None: args["technical_contact"] = unmarshal_Contact(field) + else: + args["technical_contact"] = None field = data.get("administrative_contact", None) if field is not None: args["administrative_contact"] = unmarshal_Contact(field) + else: + args["administrative_contact"] = None field = data.get("external_domain_registration_status", None) if field is not None: args["external_domain_registration_status"] = ( unmarshal_DomainRegistrationStatusExternalDomain(field) ) + else: + args["external_domain_registration_status"] = None field = data.get("transfer_registration_status", None) if field is not None: args["transfer_registration_status"] = ( unmarshal_DomainRegistrationStatusTransfer(field) ) + else: + args["transfer_registration_status"] = None field = data.get("tld", None) if field is not None: args["tld"] = unmarshal_Tld(field) + else: + args["tld"] = None return Domain(**args) @@ -1062,10 +1124,14 @@ def unmarshal_RecordHTTPServiceConfig(data: Any) -> RecordHTTPServiceConfig: field = data.get("must_contain", None) if field is not None: args["must_contain"] = field + else: + args["must_contain"] = None field = data.get("user_agent", None) if field is not None: args["user_agent"] = field + else: + args["user_agent"] = None return RecordHTTPServiceConfig(**args) @@ -1132,7 +1198,7 @@ def unmarshal_Record(data: Any) -> Record: if field is not None: args["ttl"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -1143,22 +1209,32 @@ def unmarshal_Record(data: Any) -> Record: field = data.get("comment", None) if field is not None: args["comment"] = field + else: + args["comment"] = None field = data.get("geo_ip_config", None) if field is not None: args["geo_ip_config"] = unmarshal_RecordGeoIPConfig(field) + else: + args["geo_ip_config"] = None field = data.get("http_service_config", None) if field is not None: args["http_service_config"] = unmarshal_RecordHTTPServiceConfig(field) + else: + args["http_service_config"] = None field = data.get("weighted_config", None) if field is not None: args["weighted_config"] = unmarshal_RecordWeightedConfig(field) + else: + args["weighted_config"] = None field = data.get("view_config", None) if field is not None: args["view_config"] = unmarshal_RecordViewConfig(field) + else: + args["view_config"] = None return Record(**args) @@ -1175,17 +1251,21 @@ def unmarshal_RecordIdentifier(data: Any) -> RecordIdentifier: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("data", None) if field is not None: args["data"] = field + else: + args["data"] = None field = data.get("ttl", None) if field is not None: args["ttl"] = field + else: + args["ttl"] = None return RecordIdentifier(**args) @@ -1229,10 +1309,14 @@ def unmarshal_RecordChangeDelete(data: Any) -> RecordChangeDelete: field = data.get("id", None) if field is not None: args["id"] = field + else: + args["id"] = None field = data.get("id_fields", None) if field is not None: args["id_fields"] = unmarshal_RecordIdentifier(field) + else: + args["id_fields"] = None return RecordChangeDelete(**args) @@ -1254,10 +1338,14 @@ def unmarshal_RecordChangeSet(data: Any) -> RecordChangeSet: field = data.get("id", None) if field is not None: args["id"] = field + else: + args["id"] = None field = data.get("id_fields", None) if field is not None: args["id_fields"] = unmarshal_RecordIdentifier(field) + else: + args["id_fields"] = None return RecordChangeSet(**args) @@ -1273,18 +1361,26 @@ def unmarshal_RecordChange(data: Any) -> RecordChange: field = data.get("add", None) if field is not None: args["add"] = unmarshal_RecordChangeAdd(field) + else: + args["add"] = None - field = data.get("set_", None) + field = data.get("set", None) if field is not None: args["set_"] = unmarshal_RecordChangeSet(field) + else: + args["set_"] = None field = data.get("delete", None) if field is not None: args["delete"] = unmarshal_RecordChangeDelete(field) + else: + args["delete"] = None field = data.get("clear", None) if field is not None: args["clear"] = unmarshal_RecordChangeClear(field) + else: + args["clear"] = None return RecordChange(**args) @@ -1397,6 +1493,8 @@ def unmarshal_ContactRoles(data: Any) -> ContactRoles: field = data.get("contact", None) if field is not None: args["contact"] = unmarshal_Contact(field) + else: + args["contact"] = None return ContactRoles(**args) @@ -1519,6 +1617,8 @@ def unmarshal_DNSZoneVersion(data: Any) -> DNSZoneVersion: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return DNSZoneVersion(**args) @@ -1625,10 +1725,14 @@ def unmarshal_DomainSummary(data: Any) -> DomainSummary: field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("status", None) if field is not None: @@ -1647,16 +1751,22 @@ def unmarshal_DomainSummary(data: Any) -> DomainSummary: args["external_domain_registration_status"] = ( unmarshal_DomainRegistrationStatusExternalDomain(field) ) + else: + args["external_domain_registration_status"] = None field = data.get("transfer_registration_status", None) if field is not None: args["transfer_registration_status"] = ( unmarshal_DomainRegistrationStatusTransfer(field) ) + else: + args["transfer_registration_status"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return DomainSummary(**args) @@ -1709,32 +1819,44 @@ def unmarshal_RenewableDomain(data: Any) -> RenewableDomain: field = data.get("renewable_duration_in_years", None) if field is not None: args["renewable_duration_in_years"] = field + else: + args["renewable_duration_in_years"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None field = data.get("limit_renew_at", None) if field is not None: args["limit_renew_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["limit_renew_at"] = None field = data.get("limit_redemption_at", None) if field is not None: args["limit_redemption_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["limit_redemption_at"] = None field = data.get("estimated_delete_at", None) if field is not None: args["estimated_delete_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["estimated_delete_at"] = None field = data.get("tld", None) if field is not None: args["tld"] = unmarshal_Tld(field) + else: + args["tld"] = None return RenewableDomain(**args) @@ -1801,7 +1923,7 @@ def unmarshal_Task(data: Any) -> Task: if field is not None: args["organization_id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -1812,22 +1934,32 @@ def unmarshal_Task(data: Any) -> Task: field = data.get("domain", None) if field is not None: args["domain"] = field + else: + args["domain"] = None field = data.get("started_at", None) if field is not None: args["started_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["started_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("message", None) if field is not None: args["message"] = field + else: + args["message"] = None field = data.get("contact_identifier", None) if field is not None: args["contact_identifier"] = field + else: + args["contact_identifier"] = None return Task(**args) @@ -1899,6 +2031,8 @@ def unmarshal_OrderResponse(data: Any) -> OrderResponse: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return OrderResponse(**args) @@ -1949,6 +2083,8 @@ def unmarshal_RegisterExternalDomainResponse( field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return RegisterExternalDomainResponse(**args) @@ -1983,6 +2119,8 @@ def unmarshal_AvailableDomain(data: Any) -> AvailableDomain: field = data.get("tld", None) if field is not None: args["tld"] = unmarshal_Tld(field) + else: + args["tld"] = None return AvailableDomain(**args) diff --git a/scaleway-async/scaleway_async/flexibleip/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/flexibleip/v1alpha1/marshalling.py index 5f44ed7d6..6f754d003 100644 --- a/scaleway-async/scaleway_async/flexibleip/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/flexibleip/v1alpha1/marshalling.py @@ -52,10 +52,14 @@ def unmarshal_MACAddress(data: Any) -> MACAddress: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return MACAddress(**args) @@ -99,6 +103,8 @@ def unmarshal_FlexibleIP(data: Any) -> FlexibleIP: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("reverse", None) if field is not None: @@ -111,14 +117,20 @@ def unmarshal_FlexibleIP(data: Any) -> FlexibleIP: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("mac_address", None) if field is not None: args["mac_address"] = unmarshal_MACAddress(field) + else: + args["mac_address"] = None field = data.get("server_id", None) if field is not None: args["server_id"] = field + else: + args["server_id"] = None return FlexibleIP(**args) diff --git a/scaleway-async/scaleway_async/function/v1beta1/marshalling.py b/scaleway-async/scaleway_async/function/v1beta1/marshalling.py index 34ff2d4d7..23c4b6ef4 100644 --- a/scaleway-async/scaleway_async/function/v1beta1/marshalling.py +++ b/scaleway-async/scaleway_async/function/v1beta1/marshalling.py @@ -79,6 +79,8 @@ def unmarshal_Cron(data: Any) -> Cron: field = data.get("args", None) if field is not None: args["args"] = field + else: + args["args"] = None return Cron(**args) @@ -114,6 +116,8 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None return Domain(**args) @@ -220,18 +224,26 @@ def unmarshal_Function(data: Any) -> Function: field = data.get("timeout", None) if field is not None: args["timeout"] = field + else: + args["timeout"] = None field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("build_message", None) if field is not None: args["build_message"] = field + else: + args["build_message"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return Function(**args) @@ -291,10 +303,14 @@ def unmarshal_Namespace(data: Any) -> Namespace: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return Namespace(**args) @@ -322,22 +338,32 @@ def unmarshal_Token(data: Any) -> Token: field = data.get("function_id", None) if field is not None: args["function_id"] = field + else: + args["function_id"] = None field = data.get("namespace_id", None) if field is not None: args["namespace_id"] = field + else: + args["namespace_id"] = None field = data.get("public_key", None) if field is not None: args["public_key"] = field + else: + args["public_key"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return Token(**args) @@ -369,6 +395,8 @@ def unmarshal_TriggerMnqNatsClientConfig(data: Any) -> TriggerMnqNatsClientConfi field = data.get("mnq_credential_id", None) if field is not None: args["mnq_credential_id"] = field + else: + args["mnq_credential_id"] = None return TriggerMnqNatsClientConfig(**args) @@ -396,6 +424,8 @@ def unmarshal_TriggerMnqSqsClientConfig(data: Any) -> TriggerMnqSqsClientConfig: field = data.get("mnq_credential_id", None) if field is not None: args["mnq_credential_id"] = field + else: + args["mnq_credential_id"] = None return TriggerMnqSqsClientConfig(**args) @@ -462,18 +492,26 @@ def unmarshal_Trigger(data: Any) -> Trigger: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("scw_sqs_config", None) if field is not None: args["scw_sqs_config"] = unmarshal_TriggerMnqSqsClientConfig(field) + else: + args["scw_sqs_config"] = None field = data.get("scw_nats_config", None) if field is not None: args["scw_nats_config"] = unmarshal_TriggerMnqNatsClientConfig(field) + else: + args["scw_nats_config"] = None field = data.get("sqs_config", None) if field is not None: args["sqs_config"] = unmarshal_TriggerSqsClientConfig(field) + else: + args["sqs_config"] = None return Trigger(**args) diff --git a/scaleway-async/scaleway_async/iam/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/iam/v1alpha1/marshalling.py index 9895c9550..5f4410d94 100644 --- a/scaleway-async/scaleway_async/iam/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/iam/v1alpha1/marshalling.py @@ -87,14 +87,20 @@ def unmarshal_JWT(data: Any) -> JWT: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return JWT(**args) @@ -130,26 +136,38 @@ def unmarshal_APIKey(data: Any) -> APIKey: field = data.get("secret_key", None) if field is not None: args["secret_key"] = field + else: + args["secret_key"] = None field = data.get("application_id", None) if field is not None: args["application_id"] = field + else: + args["application_id"] = None field = data.get("user_id", None) if field is not None: args["user_id"] = field + else: + args["user_id"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return APIKey(**args) @@ -193,10 +211,14 @@ def unmarshal_Application(data: Any) -> Application: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Application(**args) @@ -240,10 +262,14 @@ def unmarshal_Group(data: Any) -> Group: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Group(**args) @@ -291,6 +317,8 @@ def unmarshal_Log(data: Any) -> Log: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return Log(**args) @@ -322,10 +350,14 @@ def unmarshal_Policy(data: Any) -> Policy: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("editable", None) if field is not None: @@ -350,18 +382,26 @@ def unmarshal_Policy(data: Any) -> Policy: field = data.get("user_id", None) if field is not None: args["user_id"] = field + else: + args["user_id"] = None field = data.get("group_id", None) if field is not None: args["group_id"] = field + else: + args["group_id"] = None field = data.get("application_id", None) if field is not None: args["application_id"] = field + else: + args["application_id"] = None field = data.get("no_principal", None) if field is not None: args["no_principal"] = field + else: + args["no_principal"] = None return Policy(**args) @@ -393,10 +433,14 @@ def unmarshal_Quotum(data: Any) -> Quotum: field = data.get("limit", None) if field is not None: args["limit"] = field + else: + args["limit"] = None field = data.get("unlimited", None) if field is not None: args["unlimited"] = field + else: + args["unlimited"] = None return Quotum(**args) @@ -440,10 +484,14 @@ def unmarshal_SSHKey(data: Any) -> SSHKey: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return SSHKey(**args) @@ -472,7 +520,7 @@ def unmarshal_User(data: Any) -> User: if field is not None: args["deletable"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -483,6 +531,8 @@ def unmarshal_User(data: Any) -> User: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("mfa", None) if field is not None: @@ -499,16 +549,22 @@ def unmarshal_User(data: Any) -> User: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("last_login_at", None) if field is not None: args["last_login_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_login_at"] = None field = data.get("two_factor_enabled", None) if field is not None: args["two_factor_enabled"] = field + else: + args["two_factor_enabled"] = None return User(**args) @@ -532,6 +588,8 @@ def unmarshal_EncodedJWT(data: Any) -> EncodedJWT: field = data.get("jwt", None) if field is not None: args["jwt"] = unmarshal_JWT(field) + else: + args["jwt"] = None return EncodedJWT(**args) @@ -664,6 +722,8 @@ def unmarshal_PermissionSet(data: Any) -> PermissionSet: field = data.get("categories", None) if field is not None: args["categories"] = field + else: + args["categories"] = None return PermissionSet(**args) @@ -750,18 +810,26 @@ def unmarshal_Rule(data: Any) -> Rule: field = data.get("permission_set_names", None) if field is not None: args["permission_set_names"] = field + else: + args["permission_set_names"] = None field = data.get("project_ids", None) if field is not None: args["project_ids"] = field + else: + args["project_ids"] = None field = data.get("organization_id", None) if field is not None: args["organization_id"] = field + else: + args["organization_id"] = None field = data.get("account_root_user_id", None) if field is not None: args["account_root_user_id"] = field + else: + args["account_root_user_id"] = None return Rule(**args) diff --git a/scaleway-async/scaleway_async/instance/v1/marshalling.py b/scaleway-async/scaleway_async/instance/v1/marshalling.py index e5fc59e05..24987fab2 100644 --- a/scaleway-async/scaleway_async/instance/v1/marshalling.py +++ b/scaleway-async/scaleway_async/instance/v1/marshalling.py @@ -294,18 +294,24 @@ def unmarshal_Volume(data: Any) -> Volume: field = data.get("export_uri", None) if field is not None: args["export_uri"] = field + else: + args["export_uri"] = None field = data.get("creation_date", None) if field is not None: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("modification_date", None) if field is not None: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None field = data.get("tags", None) if field is not None: @@ -322,6 +328,8 @@ def unmarshal_Volume(data: Any) -> Volume: field = data.get("server", None) if field is not None: args["server"] = unmarshal_ServerSummary(field) + else: + args["server"] = None return Volume(**args) @@ -394,16 +402,22 @@ def unmarshal_Image(data: Any) -> Image: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("modification_date", None) if field is not None: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None field = data.get("default_bootscript", None) if field is not None: args["default_bootscript"] = unmarshal_Bootscript(field) + else: + args["default_bootscript"] = None field = data.get("public", None) if field is not None: @@ -428,6 +442,8 @@ def unmarshal_Image(data: Any) -> Image: field = data.get("root_volume", None) if field is not None: args["root_volume"] = unmarshal_VolumeSummary(field) + else: + args["root_volume"] = None return Image(**args) @@ -618,6 +634,8 @@ def unmarshal_ServerMaintenance(data: Any) -> ServerMaintenance: field = data.get("start_date", None) if field is not None: args["start_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["start_date"] = None return ServerMaintenance(**args) @@ -653,6 +671,8 @@ def unmarshal_VolumeServer(data: Any) -> VolumeServer: field = data.get("server", None) if field is not None: args["server"] = unmarshal_ServerSummary(field) + else: + args["server"] = None field = data.get("volume_type", None) if field is not None: @@ -679,12 +699,16 @@ def unmarshal_VolumeServer(data: Any) -> VolumeServer: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("modification_date", None) if field is not None: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None return VolumeServer(**args) @@ -732,6 +756,8 @@ def unmarshal_Server(data: Any) -> Server: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("dynamic_ip_required", None) if field is not None: @@ -752,18 +778,26 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("enable_ipv6", None) if field is not None: args["enable_ipv6"] = field + else: + args["enable_ipv6"] = None field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) + else: + args["image"] = None field = data.get("private_ip", None) if field is not None: args["private_ip"] = field + else: + args["private_ip"] = None field = data.get("public_ip", None) if field is not None: args["public_ip"] = unmarshal_ServerIp(field) + else: + args["public_ip"] = None field = data.get("public_ips", None) if field is not None: @@ -796,22 +830,32 @@ def unmarshal_Server(data: Any) -> Server: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None field = data.get("location", None) if field is not None: args["location"] = unmarshal_ServerLocation(field) + else: + args["location"] = None field = data.get("ipv6", None) if field is not None: args["ipv6"] = unmarshal_ServerIpv6(field) + else: + args["ipv6"] = None field = data.get("bootscript", None) if field is not None: args["bootscript"] = unmarshal_Bootscript(field) + else: + args["bootscript"] = None field = data.get("security_group", None) if field is not None: args["security_group"] = unmarshal_SecurityGroupSummary(field) + else: + args["security_group"] = None field = data.get("maintenances", None) if field is not None: @@ -842,6 +886,8 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("placement_group", None) if field is not None: args["placement_group"] = unmarshal_PlacementGroup(field) + else: + args["placement_group"] = None return Server(**args) @@ -857,6 +903,8 @@ def unmarshal_AttachServerVolumeResponse(data: Any) -> AttachServerVolumeRespons field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return AttachServerVolumeResponse(**args) @@ -872,6 +920,8 @@ def unmarshal_CreateImageResponse(data: Any) -> CreateImageResponse: field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) + else: + args["image"] = None return CreateImageResponse(**args) @@ -904,7 +954,7 @@ def unmarshal_Ip(data: Any) -> Ip: if field is not None: args["project"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -927,10 +977,14 @@ def unmarshal_Ip(data: Any) -> Ip: field = data.get("reverse", None) if field is not None: args["reverse"] = field + else: + args["reverse"] = None field = data.get("server", None) if field is not None: args["server"] = unmarshal_ServerSummary(field) + else: + args["server"] = None return Ip(**args) @@ -946,6 +1000,8 @@ def unmarshal_CreateIpResponse(data: Any) -> CreateIpResponse: field = data.get("ip", None) if field is not None: args["ip"] = unmarshal_Ip(field) + else: + args["ip"] = None return CreateIpResponse(**args) @@ -961,6 +1017,8 @@ def unmarshal_CreatePlacementGroupResponse(data: Any) -> CreatePlacementGroupRes field = data.get("placement_group", None) if field is not None: args["placement_group"] = unmarshal_PlacementGroup(field) + else: + args["placement_group"] = None return CreatePlacementGroupResponse(**args) @@ -976,6 +1034,8 @@ def unmarshal_CreatePrivateNICResponse(data: Any) -> CreatePrivateNICResponse: field = data.get("private_nic", None) if field is not None: args["private_nic"] = unmarshal_PrivateNIC(field) + else: + args["private_nic"] = None return CreatePrivateNICResponse(**args) @@ -1049,18 +1109,24 @@ def unmarshal_SecurityGroup(data: Any) -> SecurityGroup: field = data.get("organization_default", None) if field is not None: args["organization_default"] = field + else: + args["organization_default"] = None field = data.get("creation_date", None) if field is not None: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("modification_date", None) if field is not None: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None return SecurityGroup(**args) @@ -1076,6 +1142,8 @@ def unmarshal_CreateSecurityGroupResponse(data: Any) -> CreateSecurityGroupRespo field = data.get("security_group", None) if field is not None: args["security_group"] = unmarshal_SecurityGroup(field) + else: + args["security_group"] = None return CreateSecurityGroupResponse(**args) @@ -1123,10 +1191,14 @@ def unmarshal_SecurityGroupRule(data: Any) -> SecurityGroupRule: field = data.get("dest_port_from", None) if field is not None: args["dest_port_from"] = field + else: + args["dest_port_from"] = None field = data.get("dest_port_to", None) if field is not None: args["dest_port_to"] = field + else: + args["dest_port_to"] = None return SecurityGroupRule(**args) @@ -1144,6 +1216,8 @@ def unmarshal_CreateSecurityGroupRuleResponse( field = data.get("rule", None) if field is not None: args["rule"] = unmarshal_SecurityGroupRule(field) + else: + args["rule"] = None return CreateSecurityGroupRuleResponse(**args) @@ -1159,6 +1233,8 @@ def unmarshal_CreateServerResponse(data: Any) -> CreateServerResponse: field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return CreateServerResponse(**args) @@ -1229,22 +1305,30 @@ def unmarshal_Snapshot(data: Any) -> Snapshot: field = data.get("base_volume", None) if field is not None: args["base_volume"] = unmarshal_SnapshotBaseVolume(field) + else: + args["base_volume"] = None field = data.get("creation_date", None) if field is not None: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("modification_date", None) if field is not None: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None field = data.get("error_reason", None) if field is not None: args["error_reason"] = field + else: + args["error_reason"] = None return Snapshot(**args) @@ -1288,12 +1372,16 @@ def unmarshal_Task(data: Any) -> Task: field = data.get("started_at", None) if field is not None: args["started_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["started_at"] = None field = data.get("terminated_at", None) if field is not None: args["terminated_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["terminated_at"] = None return Task(**args) @@ -1309,10 +1397,14 @@ def unmarshal_CreateSnapshotResponse(data: Any) -> CreateSnapshotResponse: field = data.get("snapshot", None) if field is not None: args["snapshot"] = unmarshal_Snapshot(field) + else: + args["snapshot"] = None field = data.get("task", None) if field is not None: args["task"] = unmarshal_Task(field) + else: + args["task"] = None return CreateSnapshotResponse(**args) @@ -1328,6 +1420,8 @@ def unmarshal_CreateVolumeResponse(data: Any) -> CreateVolumeResponse: field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None return CreateVolumeResponse(**args) @@ -1343,6 +1437,8 @@ def unmarshal_DetachServerVolumeResponse(data: Any) -> DetachServerVolumeRespons field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return DetachServerVolumeResponse(**args) @@ -1358,6 +1454,8 @@ def unmarshal_ExportSnapshotResponse(data: Any) -> ExportSnapshotResponse: field = data.get("task", None) if field is not None: args["task"] = unmarshal_Task(field) + else: + args["task"] = None return ExportSnapshotResponse(**args) @@ -1373,6 +1471,8 @@ def unmarshal_GetBootscriptResponse(data: Any) -> GetBootscriptResponse: field = data.get("bootscript", None) if field is not None: args["bootscript"] = unmarshal_Bootscript(field) + else: + args["bootscript"] = None return GetBootscriptResponse(**args) @@ -1459,6 +1559,8 @@ def unmarshal_GetDashboardResponse(data: Any) -> GetDashboardResponse: field = data.get("dashboard", None) if field is not None: args["dashboard"] = unmarshal_Dashboard(field) + else: + args["dashboard"] = None return GetDashboardResponse(**args) @@ -1474,6 +1576,8 @@ def unmarshal_GetImageResponse(data: Any) -> GetImageResponse: field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) + else: + args["image"] = None return GetImageResponse(**args) @@ -1489,6 +1593,8 @@ def unmarshal_GetIpResponse(data: Any) -> GetIpResponse: field = data.get("ip", None) if field is not None: args["ip"] = unmarshal_Ip(field) + else: + args["ip"] = None return GetIpResponse(**args) @@ -1504,6 +1610,8 @@ def unmarshal_GetPlacementGroupResponse(data: Any) -> GetPlacementGroupResponse: field = data.get("placement_group", None) if field is not None: args["placement_group"] = unmarshal_PlacementGroup(field) + else: + args["placement_group"] = None return GetPlacementGroupResponse(**args) @@ -1563,6 +1671,8 @@ def unmarshal_GetPrivateNICResponse(data: Any) -> GetPrivateNICResponse: field = data.get("private_nic", None) if field is not None: args["private_nic"] = unmarshal_PrivateNIC(field) + else: + args["private_nic"] = None return GetPrivateNICResponse(**args) @@ -1578,6 +1688,8 @@ def unmarshal_GetSecurityGroupResponse(data: Any) -> GetSecurityGroupResponse: field = data.get("security_group", None) if field is not None: args["security_group"] = unmarshal_SecurityGroup(field) + else: + args["security_group"] = None return GetSecurityGroupResponse(**args) @@ -1593,6 +1705,8 @@ def unmarshal_GetSecurityGroupRuleResponse(data: Any) -> GetSecurityGroupRuleRes field = data.get("rule", None) if field is not None: args["rule"] = unmarshal_SecurityGroupRule(field) + else: + args["rule"] = None return GetSecurityGroupRuleResponse(**args) @@ -1608,6 +1722,8 @@ def unmarshal_GetServerResponse(data: Any) -> GetServerResponse: field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return GetServerResponse(**args) @@ -1668,6 +1784,8 @@ def unmarshal_GetSnapshotResponse(data: Any) -> GetSnapshotResponse: field = data.get("snapshot", None) if field is not None: args["snapshot"] = unmarshal_Snapshot(field) + else: + args["snapshot"] = None return GetSnapshotResponse(**args) @@ -1683,6 +1801,8 @@ def unmarshal_GetVolumeResponse(data: Any) -> GetVolumeResponse: field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None return GetVolumeResponse(**args) @@ -1900,10 +2020,14 @@ def unmarshal_ServerTypeNetworkInterface(data: Any) -> ServerTypeNetworkInterfac field = data.get("internal_bandwidth", None) if field is not None: args["internal_bandwidth"] = field + else: + args["internal_bandwidth"] = None field = data.get("internet_bandwidth", None) if field is not None: args["internet_bandwidth"] = field + else: + args["internet_bandwidth"] = None return ServerTypeNetworkInterface(**args) @@ -1944,6 +2068,8 @@ def unmarshal_ServerTypeCapabilities(data: Any) -> ServerTypeCapabilities: field = data.get("block_storage", None) if field is not None: args["block_storage"] = field + else: + args["block_storage"] = None return ServerTypeCapabilities(**args) @@ -1971,10 +2097,14 @@ def unmarshal_ServerTypeNetwork(data: Any) -> ServerTypeNetwork: field = data.get("sum_internal_bandwidth", None) if field is not None: args["sum_internal_bandwidth"] = field + else: + args["sum_internal_bandwidth"] = None field = data.get("sum_internet_bandwidth", None) if field is not None: args["sum_internet_bandwidth"] = field + else: + args["sum_internet_bandwidth"] = None return ServerTypeNetwork(**args) @@ -1992,6 +2122,8 @@ def unmarshal_ServerTypeVolumeConstraintsByType( field = data.get("l_ssd", None) if field is not None: args["l_ssd"] = unmarshal_ServerTypeVolumeConstraintSizes(field) + else: + args["l_ssd"] = None return ServerTypeVolumeConstraintsByType(**args) @@ -2007,6 +2139,8 @@ def unmarshal_ServerType(data: Any) -> ServerType: field = data.get("monthly_price", None) if field is not None: args["monthly_price"] = field + else: + args["monthly_price"] = None field = data.get("hourly_price", None) if field is not None: @@ -2037,26 +2171,38 @@ def unmarshal_ServerType(data: Any) -> ServerType: args["per_volume_constraint"] = unmarshal_ServerTypeVolumeConstraintsByType( field ) + else: + args["per_volume_constraint"] = None field = data.get("volumes_constraint", None) if field is not None: args["volumes_constraint"] = unmarshal_ServerTypeVolumeConstraintSizes(field) + else: + args["volumes_constraint"] = None field = data.get("gpu", None) if field is not None: args["gpu"] = field + else: + args["gpu"] = None field = data.get("network", None) if field is not None: args["network"] = unmarshal_ServerTypeNetwork(field) + else: + args["network"] = None field = data.get("capabilities", None) if field is not None: args["capabilities"] = unmarshal_ServerTypeCapabilities(field) + else: + args["capabilities"] = None field = data.get("scratch_storage_max_size", None) if field is not None: args["scratch_storage_max_size"] = field + else: + args["scratch_storage_max_size"] = None return ServerType(**args) @@ -2175,10 +2321,14 @@ def unmarshal_VolumeType(data: Any) -> VolumeType: field = data.get("capabilities", None) if field is not None: args["capabilities"] = unmarshal_VolumeTypeCapabilities(field) + else: + args["capabilities"] = None field = data.get("constraints", None) if field is not None: args["constraints"] = unmarshal_VolumeTypeConstraints(field) + else: + args["constraints"] = None return VolumeType(**args) @@ -2227,6 +2377,8 @@ def unmarshal_MigrationPlan(data: Any) -> MigrationPlan: field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None return MigrationPlan(**args) @@ -2242,6 +2394,8 @@ def unmarshal_ServerActionResponse(data: Any) -> ServerActionResponse: field = data.get("task", None) if field is not None: args["task"] = unmarshal_Task(field) + else: + args["task"] = None return ServerActionResponse(**args) @@ -2257,6 +2411,8 @@ def unmarshal_SetPlacementGroupResponse(data: Any) -> SetPlacementGroupResponse: field = data.get("placement_group", None) if field is not None: args["placement_group"] = unmarshal_PlacementGroup(field) + else: + args["placement_group"] = None return SetPlacementGroupResponse(**args) @@ -2312,6 +2468,8 @@ def unmarshal_UpdateImageResponse(data: Any) -> UpdateImageResponse: field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) + else: + args["image"] = None return UpdateImageResponse(**args) @@ -2327,6 +2485,8 @@ def unmarshal_UpdateIpResponse(data: Any) -> UpdateIpResponse: field = data.get("ip", None) if field is not None: args["ip"] = unmarshal_Ip(field) + else: + args["ip"] = None return UpdateIpResponse(**args) @@ -2342,6 +2502,8 @@ def unmarshal_UpdatePlacementGroupResponse(data: Any) -> UpdatePlacementGroupRes field = data.get("placement_group", None) if field is not None: args["placement_group"] = unmarshal_PlacementGroup(field) + else: + args["placement_group"] = None return UpdatePlacementGroupResponse(**args) @@ -2378,6 +2540,8 @@ def unmarshal_UpdateSecurityGroupResponse(data: Any) -> UpdateSecurityGroupRespo field = data.get("security_group", None) if field is not None: args["security_group"] = unmarshal_SecurityGroup(field) + else: + args["security_group"] = None return UpdateSecurityGroupResponse(**args) @@ -2395,6 +2559,8 @@ def unmarshal_UpdateSecurityGroupRuleResponse( field = data.get("rule", None) if field is not None: args["rule"] = unmarshal_SecurityGroupRule(field) + else: + args["rule"] = None return UpdateSecurityGroupRuleResponse(**args) @@ -2410,6 +2576,8 @@ def unmarshal_UpdateServerResponse(data: Any) -> UpdateServerResponse: field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return UpdateServerResponse(**args) @@ -2425,6 +2593,8 @@ def unmarshal_UpdateSnapshotResponse(data: Any) -> UpdateSnapshotResponse: field = data.get("snapshot", None) if field is not None: args["snapshot"] = unmarshal_Snapshot(field) + else: + args["snapshot"] = None return UpdateSnapshotResponse(**args) @@ -2440,6 +2610,8 @@ def unmarshal_UpdateVolumeResponse(data: Any) -> UpdateVolumeResponse: field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None return UpdateVolumeResponse(**args) @@ -2455,6 +2627,8 @@ def unmarshal__SetImageResponse(data: Any) -> _SetImageResponse: field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) + else: + args["image"] = None return _SetImageResponse(**args) @@ -2470,6 +2644,8 @@ def unmarshal__SetSecurityGroupResponse(data: Any) -> _SetSecurityGroupResponse: field = data.get("security_group", None) if field is not None: args["security_group"] = unmarshal_SecurityGroup(field) + else: + args["security_group"] = None return _SetSecurityGroupResponse(**args) @@ -2485,6 +2661,8 @@ def unmarshal__SetSecurityGroupRuleResponse(data: Any) -> _SetSecurityGroupRuleR field = data.get("rule", None) if field is not None: args["rule"] = unmarshal_SecurityGroupRule(field) + else: + args["rule"] = None return _SetSecurityGroupRuleResponse(**args) @@ -2500,6 +2678,8 @@ def unmarshal__SetServerResponse(data: Any) -> _SetServerResponse: field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return _SetServerResponse(**args) @@ -2515,6 +2695,8 @@ def unmarshal__SetSnapshotResponse(data: Any) -> _SetSnapshotResponse: field = data.get("snapshot", None) if field is not None: args["snapshot"] = unmarshal_Snapshot(field) + else: + args["snapshot"] = None return _SetSnapshotResponse(**args) diff --git a/scaleway-async/scaleway_async/iot/v1/marshalling.py b/scaleway-async/scaleway_async/iot/v1/marshalling.py index 97f75d2c5..0397cfe72 100644 --- a/scaleway-async/scaleway_async/iot/v1/marshalling.py +++ b/scaleway-async/scaleway_async/iot/v1/marshalling.py @@ -75,6 +75,8 @@ def unmarshal_DeviceMessageFiltersRule(data: Any) -> DeviceMessageFiltersRule: field = data.get("topics", None) if field is not None: args["topics"] = field + else: + args["topics"] = None return DeviceMessageFiltersRule(**args) @@ -90,10 +92,14 @@ def unmarshal_DeviceMessageFilters(data: Any) -> DeviceMessageFilters: field = data.get("publish", None) if field is not None: args["publish"] = unmarshal_DeviceMessageFiltersRule(field) + else: + args["publish"] = None field = data.get("subscribe", None) if field is not None: args["subscribe"] = unmarshal_DeviceMessageFiltersRule(field) + else: + args["subscribe"] = None return DeviceMessageFilters(**args) @@ -131,6 +137,8 @@ def unmarshal_Device(data: Any) -> Device: args["last_activity_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_activity_at"] = None field = data.get("is_connected", None) if field is not None: @@ -151,14 +159,20 @@ def unmarshal_Device(data: Any) -> Device: field = data.get("message_filters", None) if field is not None: args["message_filters"] = unmarshal_DeviceMessageFilters(field) + else: + args["message_filters"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Device(**args) @@ -179,7 +193,7 @@ def unmarshal_Network(data: Any) -> Network: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -198,6 +212,8 @@ def unmarshal_Network(data: Any) -> Network: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return Network(**args) @@ -288,14 +304,20 @@ def unmarshal_Hub(data: Any) -> Hub: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("twins_graphite_config", None) if field is not None: args["twins_graphite_config"] = unmarshal_HubTwinsGraphiteConfig(field) + else: + args["twins_graphite_config"] = None return Hub(**args) @@ -330,10 +352,14 @@ def unmarshal_CreateDeviceResponse(data: Any) -> CreateDeviceResponse: field = data.get("device", None) if field is not None: args["device"] = unmarshal_Device(field) + else: + args["device"] = None field = data.get("certificate", None) if field is not None: args["certificate"] = unmarshal_Certificate(field) + else: + args["certificate"] = None return CreateDeviceResponse(**args) @@ -353,6 +379,8 @@ def unmarshal_CreateNetworkResponse(data: Any) -> CreateNetworkResponse: field = data.get("network", None) if field is not None: args["network"] = unmarshal_Network(field) + else: + args["network"] = None return CreateNetworkResponse(**args) @@ -372,6 +400,8 @@ def unmarshal_GetDeviceCertificateResponse(data: Any) -> GetDeviceCertificateRes field = data.get("device", None) if field is not None: args["device"] = unmarshal_Device(field) + else: + args["device"] = None return GetDeviceCertificateResponse(**args) @@ -510,17 +540,21 @@ def unmarshal_RouteSummary(data: Any) -> RouteSummary: if field is not None: args["topic"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return RouteSummary(**args) @@ -595,10 +629,14 @@ def unmarshal_RenewDeviceCertificateResponse( field = data.get("device", None) if field is not None: args["device"] = unmarshal_Device(field) + else: + args["device"] = None field = data.get("certificate", None) if field is not None: args["certificate"] = unmarshal_Certificate(field) + else: + args["certificate"] = None return RenewDeviceCertificateResponse(**args) @@ -716,29 +754,39 @@ def unmarshal_Route(data: Any) -> Route: if field is not None: args["topic"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("s3_config", None) if field is not None: args["s3_config"] = unmarshal_RouteS3Config(field) + else: + args["s3_config"] = None field = data.get("db_config", None) if field is not None: args["db_config"] = unmarshal_RouteDatabaseConfig(field) + else: + args["db_config"] = None field = data.get("rest_config", None) if field is not None: args["rest_config"] = unmarshal_RouteRestConfig(field) + else: + args["rest_config"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Route(**args) @@ -758,6 +806,8 @@ def unmarshal_SetDeviceCertificateResponse(data: Any) -> SetDeviceCertificateRes field = data.get("device", None) if field is not None: args["device"] = unmarshal_Device(field) + else: + args["device"] = None return SetDeviceCertificateResponse(**args) @@ -785,6 +835,8 @@ def unmarshal_TwinDocument(data: Any) -> TwinDocument: field = data.get("data", None) if field is not None: args["data"] = field + else: + args["data"] = None return TwinDocument(**args) diff --git a/scaleway-async/scaleway_async/jobs/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/jobs/v1alpha1/marshalling.py index ecb70454a..71b680b67 100644 --- a/scaleway-async/scaleway_async/jobs/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/jobs/v1alpha1/marshalling.py @@ -78,10 +78,14 @@ def unmarshal_JobDefinition(data: Any) -> JobDefinition: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("environment_variables", None) if field is not None: @@ -102,10 +106,14 @@ def unmarshal_JobDefinition(data: Any) -> JobDefinition: field = data.get("job_timeout", None) if field is not None: args["job_timeout"] = field + else: + args["job_timeout"] = None field = data.get("cron_schedule", None) if field is not None: args["cron_schedule"] = unmarshal_CronSchedule(field) + else: + args["cron_schedule"] = None return JobDefinition(**args) @@ -149,24 +157,34 @@ def unmarshal_JobRun(data: Any) -> JobRun: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("terminated_at", None) if field is not None: args["terminated_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["terminated_at"] = None field = data.get("exit_code", None) if field is not None: args["exit_code"] = field + else: + args["exit_code"] = None field = data.get("run_duration", None) if field is not None: args["run_duration"] = field + else: + args["run_duration"] = None field = data.get("environment_variables", None) if field is not None: @@ -183,6 +201,8 @@ def unmarshal_JobRun(data: Any) -> JobRun: field = data.get("started_at", None) if field is not None: args["started_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["started_at"] = None return JobRun(**args) diff --git a/scaleway-async/scaleway_async/k8s/v1/marshalling.py b/scaleway-async/scaleway_async/k8s/v1/marshalling.py index 9f324c14b..8f813da49 100644 --- a/scaleway-async/scaleway_async/k8s/v1/marshalling.py +++ b/scaleway-async/scaleway_async/k8s/v1/marshalling.py @@ -114,10 +114,14 @@ def unmarshal_Pool(data: Any) -> Pool: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("min_size", None) if field is not None: @@ -162,14 +166,20 @@ def unmarshal_Pool(data: Any) -> Pool: field = data.get("placement_group_id", None) if field is not None: args["placement_group_id"] = field + else: + args["placement_group_id"] = None field = data.get("upgrade_policy", None) if field is not None: args["upgrade_policy"] = unmarshal_PoolUpgradePolicy(field) + else: + args["upgrade_policy"] = None field = data.get("root_volume_size", None) if field is not None: args["root_volume_size"] = field + else: + args["root_volume_size"] = None return Pool(**args) @@ -253,6 +263,8 @@ def unmarshal_ClusterAutoUpgrade(data: Any) -> ClusterAutoUpgrade: field = data.get("maintenance_window", None) if field is not None: args["maintenance_window"] = unmarshal_MaintenanceWindow(field) + else: + args["maintenance_window"] = None return ClusterAutoUpgrade(**args) @@ -359,7 +371,7 @@ def unmarshal_Cluster(data: Any) -> Cluster: if field is not None: args["id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -426,32 +438,52 @@ def unmarshal_Cluster(data: Any) -> Cluster: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("autoscaler_config", None) if field is not None: args["autoscaler_config"] = unmarshal_ClusterAutoscalerConfig(field) + else: + args["autoscaler_config"] = None field = data.get("auto_upgrade", None) if field is not None: args["auto_upgrade"] = unmarshal_ClusterAutoUpgrade(field) + else: + args["auto_upgrade"] = None field = data.get("open_id_connect_config", None) if field is not None: args["open_id_connect_config"] = unmarshal_ClusterOpenIDConnectConfig(field) + else: + args["open_id_connect_config"] = None field = data.get("private_network_id", None) if field is not None: args["private_network_id"] = field + else: + args["private_network_id"] = None field = data.get("commitment_ends_at", None) if field is not None: args["commitment_ends_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["commitment_ends_at"] = None + + field = data.get("routed_ip_enabled", None) + if field is not None: + args["routed_ip_enabled"] = field + else: + args["routed_ip_enabled"] = None field = data.get("routed_ip_enabled", None) if field is not None: @@ -495,14 +527,20 @@ def unmarshal_Node(data: Any) -> Node: field = data.get("public_ip_v4", None) if field is not None: args["public_ip_v4"] = field + else: + args["public_ip_v4"] = None field = data.get("public_ip_v6", None) if field is not None: args["public_ip_v6"] = field + else: + args["public_ip_v6"] = None field = data.get("conditions", None) if field is not None: args["conditions"] = field + else: + args["conditions"] = None field = data.get("status", None) if field is not None: @@ -511,14 +549,20 @@ def unmarshal_Node(data: Any) -> Node: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Node(**args) @@ -656,6 +700,8 @@ def unmarshal_ClusterType(data: Any) -> ClusterType: field = data.get("commitment_delay", None) if field is not None: args["commitment_delay"] = field + else: + args["commitment_delay"] = None return ClusterType(**args) diff --git a/scaleway-async/scaleway_async/lb/v1/marshalling.py b/scaleway-async/scaleway_async/lb/v1/marshalling.py index b68ee34b9..f339251e9 100644 --- a/scaleway-async/scaleway_async/lb/v1/marshalling.py +++ b/scaleway-async/scaleway_async/lb/v1/marshalling.py @@ -140,10 +140,14 @@ def unmarshal_Ip(data: Any) -> Ip: field = data.get("lb_id", None) if field is not None: args["lb_id"] = field + else: + args["lb_id"] = None field = data.get("region", None) if field is not None: args["region"] = field + else: + args["region"] = None return Ip(**args) @@ -197,10 +201,14 @@ def unmarshal_Subscriber(data: Any) -> Subscriber: field = data.get("email_config", None) if field is not None: args["email_config"] = unmarshal_SubscriberEmailConfig(field) + else: + args["email_config"] = None field = data.get("webhook_config", None) if field is not None: args["webhook_config"] = unmarshal_SubscriberWebhookConfig(field) + else: + args["webhook_config"] = None return Subscriber(**args) @@ -228,6 +236,8 @@ def unmarshal_HealthCheckHttpConfig(data: Any) -> HealthCheckHttpConfig: field = data.get("code", None) if field is not None: args["code"] = field + else: + args["code"] = None return HealthCheckHttpConfig(**args) @@ -259,6 +269,8 @@ def unmarshal_HealthCheckHttpsConfig(data: Any) -> HealthCheckHttpsConfig: field = data.get("code", None) if field is not None: args["code"] = field + else: + args["code"] = None return HealthCheckHttpsConfig(**args) @@ -345,18 +357,26 @@ def unmarshal_HealthCheck(data: Any) -> HealthCheck: field = data.get("check_delay", None) if field is not None: args["check_delay"] = field + else: + args["check_delay"] = None field = data.get("check_timeout", None) if field is not None: args["check_timeout"] = field + else: + args["check_timeout"] = None field = data.get("tcp_config", None) if field is not None: args["tcp_config"] = unmarshal_HealthCheckTcpConfig(field) + else: + args["tcp_config"] = None field = data.get("mysql_config", None) if field is not None: args["mysql_config"] = unmarshal_HealthCheckMysqlConfig(field) + else: + args["mysql_config"] = None field = data.get("check_send_proxy", None) if field is not None: @@ -365,26 +385,38 @@ def unmarshal_HealthCheck(data: Any) -> HealthCheck: field = data.get("pgsql_config", None) if field is not None: args["pgsql_config"] = unmarshal_HealthCheckPgsqlConfig(field) + else: + args["pgsql_config"] = None field = data.get("ldap_config", None) if field is not None: args["ldap_config"] = unmarshal_HealthCheckLdapConfig(field) + else: + args["ldap_config"] = None field = data.get("redis_config", None) if field is not None: args["redis_config"] = unmarshal_HealthCheckRedisConfig(field) + else: + args["redis_config"] = None field = data.get("http_config", None) if field is not None: args["http_config"] = unmarshal_HealthCheckHttpConfig(field) + else: + args["http_config"] = None field = data.get("https_config", None) if field is not None: args["https_config"] = unmarshal_HealthCheckHttpsConfig(field) + else: + args["https_config"] = None field = data.get("transient_check_delay", None) if field is not None: args["transient_check_delay"] = field + else: + args["transient_check_delay"] = None return HealthCheck(**args) @@ -416,14 +448,20 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("region", None) if field is not None: args["region"] = field + else: + args["region"] = None return Instance(**args) @@ -482,7 +520,7 @@ def unmarshal_Lb(data: Any) -> Lb: if field is not None: args["backend_count"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -505,18 +543,26 @@ def unmarshal_Lb(data: Any) -> Lb: field = data.get("subscriber", None) if field is not None: args["subscriber"] = unmarshal_Subscriber(field) + else: + args["subscriber"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("region", None) if field is not None: args["region"] = field + else: + args["region"] = None return Lb(**args) @@ -572,62 +618,92 @@ def unmarshal_Backend(data: Any) -> Backend: field = data.get("health_check", None) if field is not None: args["health_check"] = unmarshal_HealthCheck(field) + else: + args["health_check"] = None field = data.get("lb", None) if field is not None: args["lb"] = unmarshal_Lb(field) + else: + args["lb"] = None field = data.get("send_proxy_v2", None) if field is not None: args["send_proxy_v2"] = field + else: + args["send_proxy_v2"] = None field = data.get("timeout_server", None) if field is not None: args["timeout_server"] = field + else: + args["timeout_server"] = None field = data.get("timeout_connect", None) if field is not None: args["timeout_connect"] = field + else: + args["timeout_connect"] = None field = data.get("timeout_tunnel", None) if field is not None: args["timeout_tunnel"] = field + else: + args["timeout_tunnel"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("failover_host", None) if field is not None: args["failover_host"] = field + else: + args["failover_host"] = None field = data.get("ssl_bridging", None) if field is not None: args["ssl_bridging"] = field + else: + args["ssl_bridging"] = None field = data.get("ignore_ssl_server_verify", None) if field is not None: args["ignore_ssl_server_verify"] = field + else: + args["ignore_ssl_server_verify"] = None field = data.get("redispatch_attempt_count", None) if field is not None: args["redispatch_attempt_count"] = field + else: + args["redispatch_attempt_count"] = None field = data.get("max_retries", None) if field is not None: args["max_retries"] = field + else: + args["max_retries"] = None field = data.get("max_connections", None) if field is not None: args["max_connections"] = field + else: + args["max_connections"] = None field = data.get("timeout_queue", None) if field is not None: args["timeout_queue"] = field + else: + args["timeout_queue"] = None return Backend(**args) @@ -640,7 +716,7 @@ def unmarshal_Certificate(data: Any) -> Certificate: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -669,16 +745,22 @@ def unmarshal_Certificate(data: Any) -> Certificate: args["not_valid_before"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["not_valid_before"] = None field = data.get("not_valid_after", None) if field is not None: args["not_valid_after"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["not_valid_after"] = None field = data.get("lb", None) if field is not None: args["lb"] = unmarshal_Lb(field) + else: + args["lb"] = None field = data.get("name", None) if field is not None: @@ -687,14 +769,20 @@ def unmarshal_Certificate(data: Any) -> Certificate: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("status_details", None) if field is not None: args["status_details"] = field + else: + args["status_details"] = None return Certificate(**args) @@ -730,26 +818,38 @@ def unmarshal_Frontend(data: Any) -> Frontend: field = data.get("backend", None) if field is not None: args["backend"] = unmarshal_Backend(field) + else: + args["backend"] = None field = data.get("lb", None) if field is not None: args["lb"] = unmarshal_Lb(field) + else: + args["lb"] = None field = data.get("timeout_client", None) if field is not None: args["timeout_client"] = field + else: + args["timeout_client"] = None field = data.get("certificate", None) if field is not None: args["certificate"] = unmarshal_Certificate(field) + else: + args["certificate"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Frontend(**args) @@ -762,7 +862,7 @@ def unmarshal_AclActionRedirect(data: Any) -> AclActionRedirect: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -773,6 +873,8 @@ def unmarshal_AclActionRedirect(data: Any) -> AclActionRedirect: field = data.get("code", None) if field is not None: args["code"] = field + else: + args["code"] = None return AclActionRedirect(**args) @@ -785,13 +887,15 @@ def unmarshal_AclAction(data: Any) -> AclAction: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("redirect", None) if field is not None: args["redirect"] = unmarshal_AclActionRedirect(field) + else: + args["redirect"] = None return AclAction(**args) @@ -823,6 +927,8 @@ def unmarshal_AclMatch(data: Any) -> AclMatch: field = data.get("http_filter_option", None) if field is not None: args["http_filter_option"] = field + else: + args["http_filter_option"] = None return AclMatch(**args) @@ -854,22 +960,32 @@ def unmarshal_Acl(data: Any) -> Acl: field = data.get("match", None) if field is not None: args["match"] = unmarshal_AclMatch(field) + else: + args["match"] = None field = data.get("action", None) if field is not None: args["action"] = unmarshal_AclAction(field) + else: + args["action"] = None field = data.get("frontend", None) if field is not None: args["frontend"] = unmarshal_Frontend(field) + else: + args["frontend"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Acl(**args) @@ -885,6 +1001,8 @@ def unmarshal_PrivateNetworkDHCPConfig(data: Any) -> PrivateNetworkDHCPConfig: field = data.get("ip_id", None) if field is not None: args["ip_id"] = field + else: + args["ip_id"] = None return PrivateNetworkDHCPConfig(**args) @@ -911,6 +1029,8 @@ def unmarshal_PrivateNetworkStaticConfig(data: Any) -> PrivateNetworkStaticConfi field = data.get("ip_address", None) if field is not None: args["ip_address"] = field + else: + args["ip_address"] = None return PrivateNetworkStaticConfig(**args) @@ -938,26 +1058,38 @@ def unmarshal_PrivateNetwork(data: Any) -> PrivateNetwork: field = data.get("lb", None) if field is not None: args["lb"] = unmarshal_Lb(field) + else: + args["lb"] = None field = data.get("static_config", None) if field is not None: args["static_config"] = unmarshal_PrivateNetworkStaticConfig(field) + else: + args["static_config"] = None field = data.get("dhcp_config", None) if field is not None: args["dhcp_config"] = unmarshal_PrivateNetworkDHCPConfig(field) + else: + args["dhcp_config"] = None field = data.get("ipam_config", None) if field is not None: args["ipam_config"] = unmarshal_PrivateNetworkIpamConfig(field) + else: + args["ipam_config"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return PrivateNetwork(**args) @@ -973,10 +1105,14 @@ def unmarshal_RouteMatch(data: Any) -> RouteMatch: field = data.get("sni", None) if field is not None: args["sni"] = field + else: + args["sni"] = None field = data.get("host_header", None) if field is not None: args["host_header"] = field + else: + args["host_header"] = None return RouteMatch(**args) @@ -1004,14 +1140,20 @@ def unmarshal_Route(data: Any) -> Route: field = data.get("match", None) if field is not None: args["match"] = unmarshal_RouteMatch(field) + else: + args["match"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Route(**args) @@ -1049,6 +1191,8 @@ def unmarshal_BackendServerStats(data: Any) -> BackendServerStats: args["server_state_changed_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["server_state_changed_at"] = None return BackendServerStats(**args) @@ -1244,6 +1388,8 @@ def unmarshal_LbType(data: Any) -> LbType: field = data.get("region", None) if field is not None: args["region"] = field + else: + args["region"] = None return LbType(**args) diff --git a/scaleway-async/scaleway_async/llm_inference/v1beta1/marshalling.py b/scaleway-async/scaleway_async/llm_inference/v1beta1/marshalling.py index e81465983..5fa39a3ad 100644 --- a/scaleway-async/scaleway_async/llm_inference/v1beta1/marshalling.py +++ b/scaleway-async/scaleway_async/llm_inference/v1beta1/marshalling.py @@ -87,10 +87,14 @@ def unmarshal_Endpoint(data: Any) -> Endpoint: field = data.get("public_access", None) if field is not None: args["public_access"] = unmarshal_EndpointPublicAccessDetails(field) + else: + args["public_access"] = None field = data.get("private_network", None) if field is not None: args["private_network"] = unmarshal_EndpointPrivateNetworkDetails(field) + else: + args["private_network"] = None return Endpoint(**args) @@ -156,14 +160,20 @@ def unmarshal_Deployment(data: Any) -> Deployment: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Deployment(**args) @@ -187,10 +197,14 @@ def unmarshal_ModelS3Model(data: Any) -> ModelS3Model: field = data.get("node_type", None) if field is not None: args["node_type"] = field + else: + args["node_type"] = None field = data.get("triton_server_version", None) if field is not None: args["triton_server_version"] = field + else: + args["triton_server_version"] = None return ModelS3Model(**args) @@ -250,14 +264,20 @@ def unmarshal_Model(data: Any) -> Model: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("s3_model", None) if field is not None: args["s3_model"] = unmarshal_ModelS3Model(field) + else: + args["s3_model"] = None return Model(**args) @@ -433,10 +453,14 @@ def unmarshal_NodeType(data: Any) -> NodeType: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return NodeType(**args) diff --git a/scaleway-async/scaleway_async/marketplace/v2/marshalling.py b/scaleway-async/scaleway_async/marketplace/v2/marshalling.py index eb495e085..c3c22a3e1 100644 --- a/scaleway-async/scaleway_async/marketplace/v2/marshalling.py +++ b/scaleway-async/scaleway_async/marketplace/v2/marshalling.py @@ -74,16 +74,22 @@ def unmarshal_Image(data: Any) -> Image: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("valid_until", None) if field is not None: args["valid_until"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["valid_until"] = None return Image(**args) @@ -116,7 +122,7 @@ def unmarshal_LocalImage(data: Any) -> LocalImage: if field is not None: args["label"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -142,16 +148,22 @@ def unmarshal_Version(data: Any) -> Version: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("published_at", None) if field is not None: args["published_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["published_at"] = None return Version(**args) diff --git a/scaleway-async/scaleway_async/mnq/v1beta1/marshalling.py b/scaleway-async/scaleway_async/mnq/v1beta1/marshalling.py index 1ffebec45..30fb4cffe 100644 --- a/scaleway-async/scaleway_async/mnq/v1beta1/marshalling.py +++ b/scaleway-async/scaleway_async/mnq/v1beta1/marshalling.py @@ -64,10 +64,14 @@ def unmarshal_NatsAccount(data: Any) -> NatsAccount: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return NatsAccount(**args) @@ -118,14 +122,20 @@ def unmarshal_NatsCredentials(data: Any) -> NatsCredentials: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("credentials", None) if field is not None: args["credentials"] = unmarshal_File(field) + else: + args["credentials"] = None return NatsCredentials(**args) @@ -141,14 +151,20 @@ def unmarshal_SnsPermissions(data: Any) -> SnsPermissions: field = data.get("can_publish", None) if field is not None: args["can_publish"] = field + else: + args["can_publish"] = None field = data.get("can_receive", None) if field is not None: args["can_receive"] = field + else: + args["can_receive"] = None field = data.get("can_manage", None) if field is not None: args["can_manage"] = field + else: + args["can_manage"] = None return SnsPermissions(**args) @@ -192,14 +208,20 @@ def unmarshal_SnsCredentials(data: Any) -> SnsCredentials: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("permissions", None) if field is not None: args["permissions"] = unmarshal_SnsPermissions(field) + else: + args["permissions"] = None return SnsCredentials(**args) @@ -215,14 +237,20 @@ def unmarshal_SqsPermissions(data: Any) -> SqsPermissions: field = data.get("can_publish", None) if field is not None: args["can_publish"] = field + else: + args["can_publish"] = None field = data.get("can_receive", None) if field is not None: args["can_receive"] = field + else: + args["can_receive"] = None field = data.get("can_manage", None) if field is not None: args["can_manage"] = field + else: + args["can_manage"] = None return SqsPermissions(**args) @@ -266,14 +294,20 @@ def unmarshal_SqsCredentials(data: Any) -> SqsCredentials: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("permissions", None) if field is not None: args["permissions"] = unmarshal_SqsPermissions(field) + else: + args["permissions"] = None return SqsCredentials(**args) @@ -389,10 +423,14 @@ def unmarshal_SnsInfo(data: Any) -> SnsInfo: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return SnsInfo(**args) @@ -424,10 +462,14 @@ def unmarshal_SqsInfo(data: Any) -> SqsInfo: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return SqsInfo(**args) diff --git a/scaleway-async/scaleway_async/rdb/v1/marshalling.py b/scaleway-async/scaleway_async/rdb/v1/marshalling.py index 4d2440648..0401c6c32 100644 --- a/scaleway-async/scaleway_async/rdb/v1/marshalling.py +++ b/scaleway-async/scaleway_async/rdb/v1/marshalling.py @@ -166,26 +166,38 @@ def unmarshal_Endpoint(data: Any) -> Endpoint: field = data.get("ip", None) if field is not None: args["ip"] = field + else: + args["ip"] = None field = data.get("name", None) if field is not None: args["name"] = field + else: + args["name"] = None field = data.get("private_network", None) if field is not None: args["private_network"] = unmarshal_EndpointPrivateNetworkDetails(field) + else: + args["private_network"] = None field = data.get("load_balancer", None) if field is not None: args["load_balancer"] = unmarshal_EndpointLoadBalancerDetails(field) + else: + args["load_balancer"] = None field = data.get("direct_access", None) if field is not None: args["direct_access"] = unmarshal_EndpointDirectAccessDetails(field) + else: + args["direct_access"] = None field = data.get("hostname", None) if field is not None: args["hostname"] = field + else: + args["hostname"] = None return Endpoint(**args) @@ -209,18 +221,26 @@ def unmarshal_Maintenance(data: Any) -> Maintenance: field = data.get("starts_at", None) if field is not None: args["starts_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["starts_at"] = None field = data.get("stops_at", None) if field is not None: args["stops_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["stops_at"] = None field = data.get("closed_at", None) if field is not None: args["closed_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["closed_at"] = None field = data.get("forced_at", None) if field is not None: args["forced_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["forced_at"] = None return Maintenance(**args) @@ -293,18 +313,26 @@ def unmarshal_DatabaseBackup(data: Any) -> DatabaseBackup: field = data.get("size", None) if field is not None: args["size"] = field + else: + args["size"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("instance_name", None) if field is not None: @@ -321,12 +349,16 @@ def unmarshal_DatabaseBackup(data: Any) -> DatabaseBackup: field = data.get("download_url", None) if field is not None: args["download_url"] = field + else: + args["download_url"] = None field = data.get("download_url_expires_at", None) if field is not None: args["download_url_expires_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["download_url_expires_at"] = None return DatabaseBackup(**args) @@ -385,14 +417,20 @@ def unmarshal_InstanceLog(data: Any) -> InstanceLog: field = data.get("download_url", None) if field is not None: args["download_url"] = field + else: + args["download_url"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return InstanceLog(**args) @@ -422,6 +460,8 @@ def unmarshal_BackupSchedule(data: Any) -> BackupSchedule: args["next_run_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["next_run_at"] = None return BackupSchedule(**args) @@ -456,10 +496,14 @@ def unmarshal_LogsPolicy(data: Any) -> LogsPolicy: field = data.get("max_age_retention", None) if field is not None: args["max_age_retention"] = field + else: + args["max_age_retention"] = None field = data.get("total_disk_retention", None) if field is not None: args["total_disk_retention"] = field + else: + args["total_disk_retention"] = None return LogsPolicy(**args) @@ -499,7 +543,7 @@ def unmarshal_Volume(data: Any) -> Volume: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -507,7 +551,7 @@ def unmarshal_Volume(data: Any) -> Volume: if field is not None: args["size"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -541,10 +585,14 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None field = data.get("project_id", None) if field is not None: @@ -583,10 +631,14 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("endpoint", None) if field is not None: args["endpoint"] = unmarshal_Endpoint(field) + else: + args["endpoint"] = None field = data.get("backup_schedule", None) if field is not None: args["backup_schedule"] = unmarshal_BackupSchedule(field) + else: + args["backup_schedule"] = None field = data.get("read_replicas", None) if field is not None: @@ -623,6 +675,8 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("logs_policy", None) if field is not None: args["logs_policy"] = unmarshal_LogsPolicy(field) + else: + args["logs_policy"] = None return Instance(**args) @@ -658,11 +712,11 @@ def unmarshal_SnapshotVolumeType(data: Any) -> SnapshotVolumeType: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -708,22 +762,32 @@ def unmarshal_Snapshot(data: Any) -> Snapshot: field = data.get("size", None) if field is not None: args["size"] = field + else: + args["size"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("volume_type", None) if field is not None: args["volume_type"] = unmarshal_SnapshotVolumeType(field) + else: + args["volume_type"] = None return Snapshot(**args) @@ -778,6 +842,8 @@ def unmarshal_ACLRule(data: Any) -> ACLRule: field = data.get("port", None) if field is not None: args["port"] = field + else: + args["port"] = None return ACLRule(**args) @@ -923,26 +989,38 @@ def unmarshal_EngineSetting(data: Any) -> EngineSetting: field = data.get("unit", None) if field is not None: args["unit"] = field + else: + args["unit"] = None field = data.get("string_constraint", None) if field is not None: args["string_constraint"] = field + else: + args["string_constraint"] = None field = data.get("int_min", None) if field is not None: args["int_min"] = field + else: + args["int_min"] = None field = data.get("int_max", None) if field is not None: args["int_max"] = field + else: + args["int_max"] = None field = data.get("float_min", None) if field is not None: args["float_min"] = field + else: + args["float_min"] = None field = data.get("float_max", None) if field is not None: args["float_max"] = field + else: + args["float_max"] = None return EngineSetting(**args) @@ -988,6 +1066,8 @@ def unmarshal_EngineVersion(data: Any) -> EngineVersion: args["end_of_life"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["end_of_life"] = None return EngineVersion(**args) @@ -1194,7 +1274,7 @@ def unmarshal_NodeTypeVolumeType(data: Any) -> NodeTypeVolumeType: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -1214,7 +1294,7 @@ def unmarshal_NodeTypeVolumeType(data: Any) -> NodeTypeVolumeType: if field is not None: args["chunk_size"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -1260,10 +1340,14 @@ def unmarshal_NodeType(data: Any) -> NodeType: field = data.get("volume_constraint", None) if field is not None: args["volume_constraint"] = unmarshal_NodeTypeVolumeConstraintSizes(field) + else: + args["volume_constraint"] = None field = data.get("is_bssd_compatible", None) if field is not None: args["is_bssd_compatible"] = field + else: + args["is_bssd_compatible"] = None field = data.get("available_volume_types", None) if field is not None: diff --git a/scaleway-async/scaleway_async/redis/v1/marshalling.py b/scaleway-async/scaleway_async/redis/v1/marshalling.py index f72dabcb0..0db4ab1a6 100644 --- a/scaleway-async/scaleway_async/redis/v1/marshalling.py +++ b/scaleway-async/scaleway_async/redis/v1/marshalling.py @@ -64,10 +64,14 @@ def unmarshal_ACLRule(data: Any) -> ACLRule: field = data.get("ip_cidr", None) if field is not None: args["ip_cidr"] = field + else: + args["ip_cidr"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return ACLRule(**args) @@ -133,10 +137,14 @@ def unmarshal_Endpoint(data: Any) -> Endpoint: field = data.get("private_network", None) if field is not None: args["private_network"] = unmarshal_PrivateNetwork(field) + else: + args["private_network"] = None field = data.get("public_network", None) if field is not None: args["public_network"] = unmarshal_PublicNetwork(field) + else: + args["public_network"] = None return Endpoint(**args) @@ -215,10 +223,14 @@ def unmarshal_Cluster(data: Any) -> Cluster: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("acl_rules", None) if field is not None: @@ -333,7 +345,7 @@ def unmarshal_AvailableClusterSetting(data: Any) -> AvailableClusterSetting: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -348,18 +360,26 @@ def unmarshal_AvailableClusterSetting(data: Any) -> AvailableClusterSetting: field = data.get("default_value", None) if field is not None: args["default_value"] = field + else: + args["default_value"] = None field = data.get("max_value", None) if field is not None: args["max_value"] = field + else: + args["max_value"] = None field = data.get("min_value", None) if field is not None: args["min_value"] = field + else: + args["min_value"] = None field = data.get("regex", None) if field is not None: args["regex"] = field + else: + args["regex"] = None return AvailableClusterSetting(**args) @@ -397,6 +417,8 @@ def unmarshal_ClusterVersion(data: Any) -> ClusterVersion: args["end_of_life_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["end_of_life_at"] = None return ClusterVersion(**args) diff --git a/scaleway-async/scaleway_async/registry/v1/marshalling.py b/scaleway-async/scaleway_async/registry/v1/marshalling.py index d720c1379..1ffa9c7f1 100644 --- a/scaleway-async/scaleway_async/registry/v1/marshalling.py +++ b/scaleway-async/scaleway_async/registry/v1/marshalling.py @@ -61,14 +61,20 @@ def unmarshal_Image(data: Any) -> Image: field = data.get("status_message", None) if field is not None: args["status_message"] = field + else: + args["status_message"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Image(**args) @@ -132,10 +138,14 @@ def unmarshal_Namespace(data: Any) -> Namespace: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Namespace(**args) @@ -171,10 +181,14 @@ def unmarshal_Tag(data: Any) -> Tag: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Tag(**args) diff --git a/scaleway-async/scaleway_async/secret/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/secret/v1alpha1/marshalling.py index 6c6efd10f..34a6622c3 100644 --- a/scaleway-async/scaleway_async/secret/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/secret/v1alpha1/marshalling.py @@ -58,6 +58,8 @@ def unmarshal_Folder(data: Any) -> Folder: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return Folder(**args) @@ -77,10 +79,14 @@ def unmarshal_EphemeralProperties(data: Any) -> EphemeralProperties: field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("expires_once_accessed", None) if field is not None: args["expires_once_accessed"] = field + else: + args["expires_once_accessed"] = None return EphemeralProperties(**args) @@ -112,18 +118,26 @@ def unmarshal_SecretVersion(data: Any) -> SecretVersion: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("ephemeral_properties", None) if field is not None: args["ephemeral_properties"] = unmarshal_EphemeralProperties(field) + else: + args["ephemeral_properties"] = None return SecretVersion(**args) @@ -143,10 +157,14 @@ def unmarshal_EphemeralPolicy(data: Any) -> EphemeralPolicy: field = data.get("time_to_live", None) if field is not None: args["time_to_live"] = field + else: + args["time_to_live"] = None field = data.get("expires_once_accessed", None) if field is not None: args["expires_once_accessed"] = field + else: + args["expires_once_accessed"] = None return EphemeralPolicy(**args) @@ -178,10 +196,14 @@ def unmarshal_Secret(data: Any) -> Secret: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("tags", None) if field is not None: @@ -199,7 +221,7 @@ def unmarshal_Secret(data: Any) -> Secret: if field is not None: args["is_protected"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -214,10 +236,14 @@ def unmarshal_Secret(data: Any) -> Secret: field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("ephemeral_policy", None) if field is not None: args["ephemeral_policy"] = unmarshal_EphemeralPolicy(field) + else: + args["ephemeral_policy"] = None return Secret(**args) @@ -245,6 +271,8 @@ def unmarshal_AccessSecretVersionResponse(data: Any) -> AccessSecretVersionRespo field = data.get("data_crc32", None) if field is not None: args["data_crc32"] = field + else: + args["data_crc32"] = None return AccessSecretVersionResponse(**args) diff --git a/scaleway-async/scaleway_async/secret/v1beta1/marshalling.py b/scaleway-async/scaleway_async/secret/v1beta1/marshalling.py index 3dfc237a4..a2bb8fb59 100644 --- a/scaleway-async/scaleway_async/secret/v1beta1/marshalling.py +++ b/scaleway-async/scaleway_async/secret/v1beta1/marshalling.py @@ -41,10 +41,14 @@ def unmarshal_EphemeralProperties(data: Any) -> EphemeralProperties: field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("expires_once_accessed", None) if field is not None: args["expires_once_accessed"] = field + else: + args["expires_once_accessed"] = None return EphemeralProperties(**args) @@ -76,18 +80,26 @@ def unmarshal_SecretVersion(data: Any) -> SecretVersion: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("ephemeral_properties", None) if field is not None: args["ephemeral_properties"] = unmarshal_EphemeralProperties(field) + else: + args["ephemeral_properties"] = None return SecretVersion(**args) @@ -107,10 +119,14 @@ def unmarshal_EphemeralPolicy(data: Any) -> EphemeralPolicy: field = data.get("time_to_live", None) if field is not None: args["time_to_live"] = field + else: + args["time_to_live"] = None field = data.get("expires_once_accessed", None) if field is not None: args["expires_once_accessed"] = field + else: + args["expires_once_accessed"] = None return EphemeralPolicy(**args) @@ -142,10 +158,14 @@ def unmarshal_Secret(data: Any) -> Secret: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("tags", None) if field is not None: @@ -163,7 +183,7 @@ def unmarshal_Secret(data: Any) -> Secret: if field is not None: args["protected"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -178,10 +198,14 @@ def unmarshal_Secret(data: Any) -> Secret: field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("ephemeral_policy", None) if field is not None: args["ephemeral_policy"] = unmarshal_EphemeralPolicy(field) + else: + args["ephemeral_policy"] = None return Secret(**args) @@ -206,13 +230,15 @@ def unmarshal_AccessSecretVersionResponse(data: Any) -> AccessSecretVersionRespo if field is not None: args["data"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("data_crc32", None) if field is not None: args["data_crc32"] = field + else: + args["data_crc32"] = None return AccessSecretVersionResponse(**args) @@ -259,6 +285,8 @@ def unmarshal_BrowseSecretsResponseItemSecretDetails( field = data.get("ephemeral_policy", None) if field is not None: args["ephemeral_policy"] = unmarshal_EphemeralPolicy(field) + else: + args["ephemeral_policy"] = None return BrowseSecretsResponseItemSecretDetails(**args) @@ -278,18 +306,26 @@ def unmarshal_BrowseSecretsResponseItem(data: Any) -> BrowseSecretsResponseItem: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("secret", None) if field is not None: args["secret"] = unmarshal_BrowseSecretsResponseItemSecretDetails(field) + else: + args["secret"] = None field = data.get("folder", None) if field is not None: args["folder"] = unmarshal_BrowseSecretsResponseItemFolderDetails(field) + else: + args["folder"] = None return BrowseSecretsResponseItem(**args) diff --git a/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/marshalling.py index 88f441a68..f250a5316 100644 --- a/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/serverless_sqldb/v1alpha1/marshalling.py @@ -51,24 +51,34 @@ def unmarshal_DatabaseBackup(data: Any) -> DatabaseBackup: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("size", None) if field is not None: args["size"] = field + else: + args["size"] = None field = data.get("download_url", None) if field is not None: args["download_url"] = field + else: + args["download_url"] = None field = data.get("download_url_expires_at", None) if field is not None: args["download_url_expires_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["download_url_expires_at"] = None return DatabaseBackup(**args) @@ -132,6 +142,8 @@ def unmarshal_Database(data: Any) -> Database: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return Database(**args) diff --git a/scaleway-async/scaleway_async/tem/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/tem/v1alpha1/marshalling.py index 5d15e237b..f35ef7308 100644 --- a/scaleway-async/scaleway_async/tem/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/tem/v1alpha1/marshalling.py @@ -53,6 +53,8 @@ def unmarshal_EmailTry(data: Any) -> EmailTry: field = data.get("tried_at", None) if field is not None: args["tried_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["tried_at"] = None return EmailTry(**args) @@ -88,6 +90,8 @@ def unmarshal_Email(data: Any) -> Email: field = data.get("rcpt_to", None) if field is not None: args["rcpt_to"] = field + else: + args["rcpt_to"] = None field = data.get("rcpt_type", None) if field is not None: @@ -118,14 +122,20 @@ def unmarshal_Email(data: Any) -> Email: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("status_details", None) if field is not None: args["status_details"] = field + else: + args["status_details"] = None return Email(**args) @@ -160,6 +170,8 @@ def unmarshal_DomainRecords(data: Any) -> DomainRecords: field = data.get("dmarc", None) if field is not None: args["dmarc"] = unmarshal_DomainRecordsDMARC(field) + else: + args["dmarc"] = None return DomainRecords(**args) @@ -183,16 +195,22 @@ def unmarshal_DomainReputation(data: Any) -> DomainReputation: field = data.get("scored_at", None) if field is not None: args["scored_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["scored_at"] = None field = data.get("previous_score", None) if field is not None: args["previous_score"] = field + else: + args["previous_score"] = None field = data.get("previous_scored_at", None) if field is not None: args["previous_scored_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["previous_scored_at"] = None return DomainReputation(**args) @@ -259,18 +277,24 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("next_check_at", None) if field is not None: args["next_check_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["next_check_at"] = None field = data.get("last_valid_at", None) if field is not None: args["last_valid_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_valid_at"] = None field = data.get("dkim_config", None) if field is not None: @@ -283,22 +307,32 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("revoked_at", None) if field is not None: args["revoked_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["revoked_at"] = None field = data.get("last_error", None) if field is not None: args["last_error"] = field + else: + args["last_error"] = None field = data.get("statistics", None) if field is not None: args["statistics"] = unmarshal_DomainStatistics(field) + else: + args["statistics"] = None field = data.get("reputation", None) if field is not None: args["reputation"] = unmarshal_DomainReputation(field) + else: + args["reputation"] = None field = data.get("records", None) if field is not None: args["records"] = unmarshal_DomainRecords(field) + else: + args["records"] = None return Domain(**args) @@ -337,10 +371,14 @@ def unmarshal_DomainLastStatusDkimRecord(data: Any) -> DomainLastStatusDkimRecor args["last_valid_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_valid_at"] = None field = data.get("error", None) if field is not None: args["error"] = field + else: + args["error"] = None return DomainLastStatusDkimRecord(**args) @@ -362,10 +400,14 @@ def unmarshal_DomainLastStatusDmarcRecord(data: Any) -> DomainLastStatusDmarcRec args["last_valid_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_valid_at"] = None field = data.get("error", None) if field is not None: args["error"] = field + else: + args["error"] = None return DomainLastStatusDmarcRecord(**args) @@ -387,10 +429,14 @@ def unmarshal_DomainLastStatusSpfRecord(data: Any) -> DomainLastStatusSpfRecord: args["last_valid_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_valid_at"] = None field = data.get("error", None) if field is not None: args["error"] = field + else: + args["error"] = None return DomainLastStatusSpfRecord(**args) @@ -414,14 +460,20 @@ def unmarshal_DomainLastStatus(data: Any) -> DomainLastStatus: field = data.get("spf_record", None) if field is not None: args["spf_record"] = unmarshal_DomainLastStatusSpfRecord(field) + else: + args["spf_record"] = None field = data.get("dkim_record", None) if field is not None: args["dkim_record"] = unmarshal_DomainLastStatusDkimRecord(field) + else: + args["dkim_record"] = None field = data.get("dmarc_record", None) if field is not None: args["dmarc_record"] = unmarshal_DomainLastStatusDmarcRecord(field) + else: + args["dmarc_record"] = None return DomainLastStatus(**args) diff --git a/scaleway-async/scaleway_async/test/v1/marshalling.py b/scaleway-async/scaleway_async/test/v1/marshalling.py index 357241bf4..6c3c3e84c 100644 --- a/scaleway-async/scaleway_async/test/v1/marshalling.py +++ b/scaleway-async/scaleway_async/test/v1/marshalling.py @@ -58,10 +58,14 @@ def unmarshal_Human(data: Any) -> Human: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("hair_count", None) if field is not None: diff --git a/scaleway-async/scaleway_async/vpc/v1/marshalling.py b/scaleway-async/scaleway_async/vpc/v1/marshalling.py index 32514e39b..b67881a31 100644 --- a/scaleway-async/scaleway_async/vpc/v1/marshalling.py +++ b/scaleway-async/scaleway_async/vpc/v1/marshalling.py @@ -52,10 +52,14 @@ def unmarshal_PrivateNetwork(data: Any) -> PrivateNetwork: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return PrivateNetwork(**args) diff --git a/scaleway-async/scaleway_async/vpc/v2/marshalling.py b/scaleway-async/scaleway_async/vpc/v2/marshalling.py index 760ec6923..fbbc5561f 100644 --- a/scaleway-async/scaleway_async/vpc/v2/marshalling.py +++ b/scaleway-async/scaleway_async/vpc/v2/marshalling.py @@ -48,10 +48,14 @@ def unmarshal_Subnet(data: Any) -> Subnet: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Subnet(**args) @@ -105,10 +109,14 @@ def unmarshal_PrivateNetwork(data: Any) -> PrivateNetwork: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return PrivateNetwork(**args) @@ -160,10 +168,14 @@ def unmarshal_VPC(data: Any) -> VPC: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return VPC(**args) diff --git a/scaleway-async/scaleway_async/vpcgw/v1/marshalling.py b/scaleway-async/scaleway_async/vpcgw/v1/marshalling.py index 80c04c1c8..a30639886 100644 --- a/scaleway-async/scaleway_async/vpcgw/v1/marshalling.py +++ b/scaleway-async/scaleway_async/vpcgw/v1/marshalling.py @@ -83,10 +83,14 @@ def unmarshal_DHCP(data: Any) -> DHCP: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("pool_high", None) if field is not None: @@ -123,14 +127,20 @@ def unmarshal_DHCP(data: Any) -> DHCP: field = data.get("valid_lifetime", None) if field is not None: args["valid_lifetime"] = field + else: + args["valid_lifetime"] = None field = data.get("renew_timer", None) if field is not None: args["renew_timer"] = field + else: + args["renew_timer"] = None field = data.get("rebind_timer", None) if field is not None: args["rebind_timer"] = field + else: + args["rebind_timer"] = None return DHCP(**args) @@ -177,14 +187,20 @@ def unmarshal_GatewayNetwork(data: Any) -> GatewayNetwork: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("mac_address", None) if field is not None: args["mac_address"] = field + else: + args["mac_address"] = None field = data.get("enable_masquerade", None) if field is not None: @@ -205,14 +221,20 @@ def unmarshal_GatewayNetwork(data: Any) -> GatewayNetwork: field = data.get("dhcp", None) if field is not None: args["dhcp"] = unmarshal_DHCP(field) + else: + args["dhcp"] = None field = data.get("address", None) if field is not None: args["address"] = field + else: + args["address"] = None field = data.get("ipam_config", None) if field is not None: args["ipam_config"] = unmarshal_IpamConfig(field) + else: + args["ipam_config"] = None return GatewayNetwork(**args) @@ -252,18 +274,26 @@ def unmarshal_IP(data: Any) -> IP: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("reverse", None) if field is not None: args["reverse"] = field + else: + args["reverse"] = None field = data.get("gateway_id", None) if field is not None: args["gateway_id"] = field + else: + args["gateway_id"] = None return IP(**args) @@ -296,7 +326,7 @@ def unmarshal_DHCPEntry(data: Any) -> DHCPEntry: if field is not None: args["hostname"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -307,10 +337,14 @@ def unmarshal_DHCPEntry(data: Any) -> DHCPEntry: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return DHCPEntry(**args) @@ -361,14 +395,20 @@ def unmarshal_Gateway(data: Any) -> Gateway: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = unmarshal_GatewayType(field) + else: + args["type_"] = None field = data.get("status", None) if field is not None: @@ -399,14 +439,20 @@ def unmarshal_Gateway(data: Any) -> Gateway: field = data.get("ip", None) if field is not None: args["ip"] = unmarshal_IP(field) + else: + args["ip"] = None field = data.get("version", None) if field is not None: args["version"] = field + else: + args["version"] = None field = data.get("can_upgrade_to", None) if field is not None: args["can_upgrade_to"] = field + else: + args["can_upgrade_to"] = None field = data.get("bastion_port", None) if field is not None: @@ -470,10 +516,14 @@ def unmarshal_PATRule(data: Any) -> PATRule: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return PATRule(**args) diff --git a/scaleway-async/scaleway_async/webhosting/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/webhosting/v1alpha1/marshalling.py index e6f39d9a3..7329b7b2e 100644 --- a/scaleway-async/scaleway_async/webhosting/v1alpha1/marshalling.py +++ b/scaleway-async/scaleway_async/webhosting/v1alpha1/marshalling.py @@ -113,14 +113,20 @@ def unmarshal_Hosting(data: Any) -> Hosting: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("platform_number", None) if field is not None: args["platform_number"] = field + else: + args["platform_number"] = None field = data.get("options", None) if field is not None: @@ -171,6 +177,8 @@ def unmarshal_Hosting(data: Any) -> Hosting: field = data.get("cpanel_urls", None) if field is not None: args["cpanel_urls"] = unmarshal_HostingCpanelUrls(field) + else: + args["cpanel_urls"] = None return Hosting(**args) @@ -187,7 +195,7 @@ def unmarshal_DnsRecord(data: Any) -> DnsRecord: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -206,6 +214,8 @@ def unmarshal_DnsRecord(data: Any) -> DnsRecord: field = data.get("priority", None) if field is not None: args["priority"] = field + else: + args["priority"] = None return DnsRecord(**args) @@ -413,10 +423,14 @@ def unmarshal_Offer(data: Any) -> Offer: field = data.get("product", None) if field is not None: args["product"] = unmarshal_OfferProduct(field) + else: + args["product"] = None field = data.get("price", None) if field is not None: args["price"] = unmarshal_Money(field) + else: + args["price"] = None return Offer(**args) diff --git a/scaleway/scaleway/account/v2/marshalling.py b/scaleway/scaleway/account/v2/marshalling.py index 869668e9d..c577c2d2d 100644 --- a/scaleway/scaleway/account/v2/marshalling.py +++ b/scaleway/scaleway/account/v2/marshalling.py @@ -40,10 +40,14 @@ def unmarshal_Project(data: Any) -> Project: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Project(**args) diff --git a/scaleway/scaleway/account/v3/marshalling.py b/scaleway/scaleway/account/v3/marshalling.py index 2725094df..adf14cd3a 100644 --- a/scaleway/scaleway/account/v3/marshalling.py +++ b/scaleway/scaleway/account/v3/marshalling.py @@ -40,10 +40,14 @@ def unmarshal_Project(data: Any) -> Project: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Project(**args) diff --git a/scaleway/scaleway/applesilicon/v1alpha1/marshalling.py b/scaleway/scaleway/applesilicon/v1alpha1/marshalling.py index 4524d1c52..9d92c06d2 100644 --- a/scaleway/scaleway/applesilicon/v1alpha1/marshalling.py +++ b/scaleway/scaleway/applesilicon/v1alpha1/marshalling.py @@ -82,7 +82,7 @@ def unmarshal_ServerTypeDisk(data: Any) -> ServerTypeDisk: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -101,7 +101,7 @@ def unmarshal_ServerTypeMemory(data: Any) -> ServerTypeMemory: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -127,18 +127,26 @@ def unmarshal_ServerType(data: Any) -> ServerType: field = data.get("cpu", None) if field is not None: args["cpu"] = unmarshal_ServerTypeCPU(field) + else: + args["cpu"] = None field = data.get("disk", None) if field is not None: args["disk"] = unmarshal_ServerTypeDisk(field) + else: + args["disk"] = None field = data.get("memory", None) if field is not None: args["memory"] = unmarshal_ServerTypeMemory(field) + else: + args["memory"] = None field = data.get("minimum_lease_duration", None) if field is not None: args["minimum_lease_duration"] = field + else: + args["minimum_lease_duration"] = None return ServerType(**args) @@ -155,7 +163,7 @@ def unmarshal_Server(data: Any) -> Server: if field is not None: args["id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -190,16 +198,22 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("deletable_at", None) if field is not None: args["deletable_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["deletable_at"] = None return Server(**args) diff --git a/scaleway/scaleway/baremetal/v1/marshalling.py b/scaleway/scaleway/baremetal/v1/marshalling.py index 60471d4da..dc637d6ca 100644 --- a/scaleway/scaleway/baremetal/v1/marshalling.py +++ b/scaleway/scaleway/baremetal/v1/marshalling.py @@ -111,6 +111,8 @@ def unmarshal_OSOSField(data: Any) -> OSOSField: field = data.get("default_value", None) if field is not None: args["default_value"] = field + else: + args["default_value"] = None return OSOSField(**args) @@ -154,22 +156,32 @@ def unmarshal_OS(data: Any) -> OS: field = data.get("ssh", None) if field is not None: args["ssh"] = unmarshal_OSOSField(field) + else: + args["ssh"] = None field = data.get("user", None) if field is not None: args["user"] = unmarshal_OSOSField(field) + else: + args["user"] = None field = data.get("password", None) if field is not None: args["password"] = unmarshal_OSOSField(field) + else: + args["password"] = None field = data.get("service_user", None) if field is not None: args["service_user"] = unmarshal_OSOSField(field) + else: + args["service_user"] = None field = data.get("service_password", None) if field is not None: args["service_password"] = unmarshal_OSOSField(field) + else: + args["service_password"] = None return OS(**args) @@ -217,7 +229,7 @@ def unmarshal_Disk(data: Any) -> Disk: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -236,7 +248,7 @@ def unmarshal_Memory(data: Any) -> Memory: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -282,10 +294,14 @@ def unmarshal_OfferOptionOffer(data: Any) -> OfferOptionOffer: field = data.get("price", None) if field is not None: args["price"] = unmarshal_Money(field) + else: + args["price"] = None field = data.get("os_id", None) if field is not None: args["os_id"] = field + else: + args["os_id"] = None return OfferOptionOffer(**args) @@ -302,7 +318,7 @@ def unmarshal_PersistentMemory(data: Any) -> PersistentMemory: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -373,10 +389,14 @@ def unmarshal_Offer(data: Any) -> Offer: field = data.get("price_per_hour", None) if field is not None: args["price_per_hour"] = unmarshal_Money(field) + else: + args["price_per_hour"] = None field = data.get("price_per_month", None) if field is not None: args["price_per_month"] = unmarshal_Money(field) + else: + args["price_per_month"] = None field = data.get("cpus", None) if field is not None: @@ -441,6 +461,8 @@ def unmarshal_Offer(data: Any) -> Offer: field = data.get("fee", None) if field is not None: args["fee"] = unmarshal_Money(field) + else: + args["fee"] = None return Offer(**args) @@ -499,14 +521,20 @@ def unmarshal_ServerPrivateNetwork(data: Any) -> ServerPrivateNetwork: field = data.get("vlan", None) if field is not None: args["vlan"] = field + else: + args["vlan"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return ServerPrivateNetwork(**args) @@ -577,6 +605,8 @@ def unmarshal_ServerOption(data: Any) -> ServerOption: field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return ServerOption(**args) @@ -635,10 +665,14 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("offer_id", None) if field is not None: @@ -681,10 +715,14 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("install", None) if field is not None: args["install"] = unmarshal_ServerInstall(field) + else: + args["install"] = None field = data.get("rescue_server", None) if field is not None: args["rescue_server"] = unmarshal_ServerRescueServer(field) + else: + args["rescue_server"] = None return Server(**args) @@ -701,7 +739,7 @@ def unmarshal_Setting(data: Any) -> Setting: if field is not None: args["id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -739,6 +777,8 @@ def unmarshal_BMCAccess(data: Any) -> BMCAccess: field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return BMCAccess(**args) @@ -754,6 +794,8 @@ def unmarshal_GetServerMetricsResponse(data: Any) -> GetServerMetricsResponse: field = data.get("pings", None) if field is not None: args["pings"] = unmarshal_TimeSeries(field) + else: + args["pings"] = None return GetServerMetricsResponse(**args) @@ -838,10 +880,14 @@ def unmarshal_ServerEvent(data: Any) -> ServerEvent: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return ServerEvent(**args) diff --git a/scaleway/scaleway/billing/v2alpha1/marshalling.py b/scaleway/scaleway/billing/v2alpha1/marshalling.py index 351e16f24..3fe90b663 100644 --- a/scaleway/scaleway/billing/v2alpha1/marshalling.py +++ b/scaleway/scaleway/billing/v2alpha1/marshalling.py @@ -48,6 +48,8 @@ def unmarshal_GetConsumptionResponseConsumption( field = data.get("value", None) if field is not None: args["value"] = unmarshal_Money(field) + else: + args["value"] = None return GetConsumptionResponseConsumption(**args) @@ -71,6 +73,8 @@ def unmarshal_GetConsumptionResponse(data: Any) -> GetConsumptionResponse: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return GetConsumptionResponse(**args) @@ -86,6 +90,8 @@ def unmarshal_DiscountCoupon(data: Any) -> DiscountCoupon: field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return DiscountCoupon(**args) @@ -98,7 +104,7 @@ def unmarshal_DiscountFilter(data: Any) -> DiscountFilter: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -156,18 +162,26 @@ def unmarshal_Discount(data: Any) -> Discount: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("start_date", None) if field is not None: args["start_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["start_date"] = None field = data.get("stop_date", None) if field is not None: args["stop_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["stop_date"] = None field = data.get("coupon", None) if field is not None: args["coupon"] = unmarshal_DiscountCoupon(field) + else: + args["coupon"] = None return Discount(**args) @@ -216,24 +230,34 @@ def unmarshal_Invoice(data: Any) -> Invoice: field = data.get("start_date", None) if field is not None: args["start_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["start_date"] = None field = data.get("issued_date", None) if field is not None: args["issued_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["issued_date"] = None field = data.get("due_date", None) if field is not None: args["due_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["due_date"] = None field = data.get("total_untaxed", None) if field is not None: args["total_untaxed"] = unmarshal_Money(field) + else: + args["total_untaxed"] = None field = data.get("total_taxed", None) if field is not None: args["total_taxed"] = unmarshal_Money(field) + else: + args["total_taxed"] = None return Invoice(**args) diff --git a/scaleway/scaleway/billing/v2beta1/marshalling.py b/scaleway/scaleway/billing/v2beta1/marshalling.py index 48328d555..69debc7a9 100644 --- a/scaleway/scaleway/billing/v2beta1/marshalling.py +++ b/scaleway/scaleway/billing/v2beta1/marshalling.py @@ -44,28 +44,38 @@ def unmarshal_Invoice(data: Any) -> Invoice: field = data.get("start_date", None) if field is not None: args["start_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["start_date"] = None field = data.get("stop_date", None) if field is not None: args["stop_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["stop_date"] = None field = data.get("billing_period", None) if field is not None: args["billing_period"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["billing_period"] = None field = data.get("issued_date", None) if field is not None: args["issued_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["issued_date"] = None field = data.get("due_date", None) if field is not None: args["due_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["due_date"] = None - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -84,22 +94,32 @@ def unmarshal_Invoice(data: Any) -> Invoice: field = data.get("total_untaxed", None) if field is not None: args["total_untaxed"] = unmarshal_Money(field) + else: + args["total_untaxed"] = None field = data.get("total_taxed", None) if field is not None: args["total_taxed"] = unmarshal_Money(field) + else: + args["total_taxed"] = None field = data.get("total_tax", None) if field is not None: args["total_tax"] = unmarshal_Money(field) + else: + args["total_tax"] = None field = data.get("total_discount", None) if field is not None: args["total_discount"] = unmarshal_Money(field) + else: + args["total_discount"] = None field = data.get("total_undiscount", None) if field is not None: args["total_undiscount"] = unmarshal_Money(field) + else: + args["total_undiscount"] = None return Invoice(**args) @@ -145,6 +165,8 @@ def unmarshal_ListConsumptionsResponseConsumption( field = data.get("value", None) if field is not None: args["value"] = unmarshal_Money(field) + else: + args["value"] = None return ListConsumptionsResponseConsumption(**args) @@ -176,6 +198,8 @@ def unmarshal_ListConsumptionsResponse(data: Any) -> ListConsumptionsResponse: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return ListConsumptionsResponse(**args) @@ -191,6 +215,8 @@ def unmarshal_DiscountCoupon(data: Any) -> DiscountCoupon: field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return DiscountCoupon(**args) @@ -203,7 +229,7 @@ def unmarshal_DiscountFilter(data: Any) -> DiscountFilter: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -261,18 +287,26 @@ def unmarshal_Discount(data: Any) -> Discount: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("start_date", None) if field is not None: args["start_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["start_date"] = None field = data.get("stop_date", None) if field is not None: args["stop_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["stop_date"] = None field = data.get("coupon", None) if field is not None: args["coupon"] = unmarshal_DiscountCoupon(field) + else: + args["coupon"] = None return Discount(**args) @@ -338,10 +372,14 @@ def unmarshal_ListTaxesResponseTax(data: Any) -> ListTaxesResponseTax: field = data.get("rate", None) if field is not None: args["rate"] = field + else: + args["rate"] = None field = data.get("total_tax_value", None) if field is not None: args["total_tax_value"] = field + else: + args["total_tax_value"] = None return ListTaxesResponseTax(**args) @@ -369,5 +407,7 @@ def unmarshal_ListTaxesResponse(data: Any) -> ListTaxesResponse: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return ListTaxesResponse(**args) diff --git a/scaleway/scaleway/block/v1alpha1/marshalling.py b/scaleway/scaleway/block/v1alpha1/marshalling.py index 4381f8215..b7f7e70b6 100644 --- a/scaleway/scaleway/block/v1alpha1/marshalling.py +++ b/scaleway/scaleway/block/v1alpha1/marshalling.py @@ -51,7 +51,7 @@ def unmarshal_Reference(data: Any) -> Reference: if field is not None: args["product_resource_id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -62,6 +62,8 @@ def unmarshal_Reference(data: Any) -> Reference: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return Reference(**args) @@ -82,7 +84,7 @@ def unmarshal_SnapshotParentVolume(data: Any) -> SnapshotParentVolume: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -135,21 +137,27 @@ def unmarshal_Snapshot(data: Any) -> Snapshot: if field is not None: args["zone"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field field = data.get("parent_volume", None) if field is not None: args["parent_volume"] = unmarshal_SnapshotParentVolume(field) + else: + args["parent_volume"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Snapshot(**args) @@ -162,13 +170,15 @@ def unmarshal_VolumeSpecifications(data: Any) -> VolumeSpecifications: args: Dict[str, Any] = {} - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field field = data.get("perf_iops", None) if field is not None: args["perf_iops"] = field + else: + args["perf_iops"] = None return VolumeSpecifications(**args) @@ -189,7 +199,7 @@ def unmarshal_Volume(data: Any) -> Volume: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -210,14 +220,20 @@ def unmarshal_Volume(data: Any) -> Volume: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("parent_snapshot_id", None) if field is not None: args["parent_snapshot_id"] = field + else: + args["parent_snapshot_id"] = None field = data.get("status", None) if field is not None: @@ -234,12 +250,16 @@ def unmarshal_Volume(data: Any) -> Volume: field = data.get("specs", None) if field is not None: args["specs"] = unmarshal_VolumeSpecifications(field) + else: + args["specs"] = None field = data.get("last_detached_at", None) if field is not None: args["last_detached_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_detached_at"] = None return Volume(**args) @@ -273,21 +293,27 @@ def unmarshal_VolumeType(data: Any) -> VolumeType: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("pricing", None) if field is not None: args["pricing"] = unmarshal_Money(field) + else: + args["pricing"] = None field = data.get("snapshot_pricing", None) if field is not None: args["snapshot_pricing"] = unmarshal_Money(field) + else: + args["snapshot_pricing"] = None field = data.get("specs", None) if field is not None: args["specs"] = unmarshal_VolumeSpecifications(field) + else: + args["specs"] = None return VolumeType(**args) diff --git a/scaleway/scaleway/cockpit/v1/marshalling.py b/scaleway/scaleway/cockpit/v1/marshalling.py index 12bc2dd4b..ac633628f 100644 --- a/scaleway/scaleway/cockpit/v1/marshalling.py +++ b/scaleway/scaleway/cockpit/v1/marshalling.py @@ -74,6 +74,8 @@ def unmarshal_ContactPoint(data: Any) -> ContactPoint: field = data.get("email", None) if field is not None: args["email"] = unmarshal_ContactPointEmail(field) + else: + args["email"] = None return ContactPoint(**args) @@ -102,7 +104,7 @@ def unmarshal_DataSource(data: Any) -> DataSource: if field is not None: args["url"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -121,10 +123,14 @@ def unmarshal_DataSource(data: Any) -> DataSource: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return DataSource(**args) @@ -183,6 +189,8 @@ def unmarshal_GrafanaUser(data: Any) -> GrafanaUser: field = data.get("password", None) if field is not None: args["password"] = field + else: + args["password"] = None return GrafanaUser(**args) @@ -218,14 +226,20 @@ def unmarshal_Plan(data: Any) -> Plan: field = data.get("retention_metrics_interval", None) if field is not None: args["retention_metrics_interval"] = field + else: + args["retention_metrics_interval"] = None field = data.get("retention_logs_interval", None) if field is not None: args["retention_logs_interval"] = field + else: + args["retention_logs_interval"] = None field = data.get("retention_traces_interval", None) if field is not None: args["retention_traces_interval"] = field + else: + args["retention_traces_interval"] = None return Plan(**args) @@ -261,14 +275,20 @@ def unmarshal_Token(data: Any) -> Token: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("secret_key", None) if field is not None: args["secret_key"] = field + else: + args["secret_key"] = None return Token(**args) @@ -296,6 +316,8 @@ def unmarshal_AlertManager(data: Any) -> AlertManager: field = data.get("alert_manager_url", None) if field is not None: args["alert_manager_url"] = field + else: + args["alert_manager_url"] = None return AlertManager(**args) @@ -488,10 +510,14 @@ def unmarshal_Usage(data: Any) -> Usage: field = data.get("data_source_id", None) if field is not None: args["data_source_id"] = field + else: + args["data_source_id"] = None field = data.get("interval", None) if field is not None: args["interval"] = field + else: + args["interval"] = None return Usage(**args) @@ -507,22 +533,32 @@ def unmarshal_UsageOverview(data: Any) -> UsageOverview: field = data.get("scaleway_metrics_usage", None) if field is not None: args["scaleway_metrics_usage"] = unmarshal_Usage(field) + else: + args["scaleway_metrics_usage"] = None field = data.get("scaleway_logs_usage", None) if field is not None: args["scaleway_logs_usage"] = unmarshal_Usage(field) + else: + args["scaleway_logs_usage"] = None field = data.get("external_metrics_usage", None) if field is not None: args["external_metrics_usage"] = unmarshal_Usage(field) + else: + args["external_metrics_usage"] = None field = data.get("external_logs_usage", None) if field is not None: args["external_logs_usage"] = unmarshal_Usage(field) + else: + args["external_logs_usage"] = None field = data.get("external_traces_usage", None) if field is not None: args["external_traces_usage"] = unmarshal_Usage(field) + else: + args["external_traces_usage"] = None return UsageOverview(**args) diff --git a/scaleway/scaleway/cockpit/v1beta1/marshalling.py b/scaleway/scaleway/cockpit/v1beta1/marshalling.py index edf291af9..47df80c4d 100644 --- a/scaleway/scaleway/cockpit/v1beta1/marshalling.py +++ b/scaleway/scaleway/cockpit/v1beta1/marshalling.py @@ -73,6 +73,8 @@ def unmarshal_ContactPoint(data: Any) -> ContactPoint: field = data.get("email", None) if field is not None: args["email"] = unmarshal_ContactPointEmail(field) + else: + args["email"] = None return ContactPoint(**args) @@ -101,7 +103,7 @@ def unmarshal_Datasource(data: Any) -> Datasource: if field is not None: args["url"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -166,6 +168,8 @@ def unmarshal_GrafanaUser(data: Any) -> GrafanaUser: field = data.get("password", None) if field is not None: args["password"] = field + else: + args["password"] = None return GrafanaUser(**args) @@ -240,18 +244,26 @@ def unmarshal_Token(data: Any) -> Token: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("scopes", None) if field is not None: args["scopes"] = unmarshal_TokenScopes(field) + else: + args["scopes"] = None field = data.get("secret_key", None) if field is not None: args["secret_key"] = field + else: + args["secret_key"] = None return Token(**args) @@ -322,14 +334,20 @@ def unmarshal_Plan(data: Any) -> Plan: field = data.get("retention_metrics_interval", None) if field is not None: args["retention_metrics_interval"] = field + else: + args["retention_metrics_interval"] = None field = data.get("retention_logs_interval", None) if field is not None: args["retention_logs_interval"] = field + else: + args["retention_logs_interval"] = None field = data.get("retention_traces_interval", None) if field is not None: args["retention_traces_interval"] = field + else: + args["retention_traces_interval"] = None return Plan(**args) @@ -357,18 +375,26 @@ def unmarshal_Cockpit(data: Any) -> Cockpit: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("endpoints", None) if field is not None: args["endpoints"] = unmarshal_CockpitEndpoints(field) + else: + args["endpoints"] = None field = data.get("plan", None) if field is not None: args["plan"] = unmarshal_Plan(field) + else: + args["plan"] = None return Cockpit(**args) diff --git a/scaleway/scaleway/container/v1beta1/marshalling.py b/scaleway/scaleway/container/v1beta1/marshalling.py index 224844ea6..98c072c3e 100644 --- a/scaleway/scaleway/container/v1beta1/marshalling.py +++ b/scaleway/scaleway/container/v1beta1/marshalling.py @@ -122,14 +122,20 @@ def unmarshal_Container(data: Any) -> Container: field = data.get("timeout", None) if field is not None: args["timeout"] = field + else: + args["timeout"] = None field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("domain_name", None) if field is not None: @@ -193,6 +199,8 @@ def unmarshal_Cron(data: Any) -> Cron: field = data.get("args", None) if field is not None: args["args"] = field + else: + args["args"] = None return Cron(**args) @@ -228,6 +236,8 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None return Domain(**args) @@ -287,10 +297,14 @@ def unmarshal_Namespace(data: Any) -> Namespace: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return Namespace(**args) @@ -318,22 +332,32 @@ def unmarshal_Token(data: Any) -> Token: field = data.get("container_id", None) if field is not None: args["container_id"] = field + else: + args["container_id"] = None field = data.get("namespace_id", None) if field is not None: args["namespace_id"] = field + else: + args["namespace_id"] = None field = data.get("public_key", None) if field is not None: args["public_key"] = field + else: + args["public_key"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return Token(**args) @@ -365,6 +389,8 @@ def unmarshal_TriggerMnqNatsClientConfig(data: Any) -> TriggerMnqNatsClientConfi field = data.get("mnq_credential_id", None) if field is not None: args["mnq_credential_id"] = field + else: + args["mnq_credential_id"] = None return TriggerMnqNatsClientConfig(**args) @@ -392,6 +418,8 @@ def unmarshal_TriggerMnqSqsClientConfig(data: Any) -> TriggerMnqSqsClientConfig: field = data.get("mnq_credential_id", None) if field is not None: args["mnq_credential_id"] = field + else: + args["mnq_credential_id"] = None return TriggerMnqSqsClientConfig(**args) @@ -458,18 +486,26 @@ def unmarshal_Trigger(data: Any) -> Trigger: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("scw_sqs_config", None) if field is not None: args["scw_sqs_config"] = unmarshal_TriggerMnqSqsClientConfig(field) + else: + args["scw_sqs_config"] = None field = data.get("scw_nats_config", None) if field is not None: args["scw_nats_config"] = unmarshal_TriggerMnqNatsClientConfig(field) + else: + args["scw_nats_config"] = None field = data.get("sqs_config", None) if field is not None: args["sqs_config"] = unmarshal_TriggerSqsClientConfig(field) + else: + args["sqs_config"] = None return Trigger(**args) diff --git a/scaleway/scaleway/dedibox/v1/marshalling.py b/scaleway/scaleway/dedibox/v1/marshalling.py index 4452a9a73..8bdd9b9b6 100644 --- a/scaleway/scaleway/dedibox/v1/marshalling.py +++ b/scaleway/scaleway/dedibox/v1/marshalling.py @@ -222,7 +222,7 @@ def unmarshal_Disk(data: Any) -> Disk: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -241,7 +241,7 @@ def unmarshal_Memory(data: Any) -> Memory: if field is not None: args["capacity"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -306,7 +306,7 @@ def unmarshal_OfferAntiDosInfo(data: Any) -> OfferAntiDosInfo: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -354,6 +354,8 @@ def unmarshal_OfferFailoverBlockInfo(data: Any) -> OfferFailoverBlockInfo: field = data.get("onetime_fees", None) if field is not None: args["onetime_fees"] = unmarshal_Offer(field) + else: + args["onetime_fees"] = None return OfferFailoverBlockInfo(**args) @@ -369,6 +371,8 @@ def unmarshal_OfferFailoverIpInfo(data: Any) -> OfferFailoverIpInfo: field = data.get("onetime_fees", None) if field is not None: args["onetime_fees"] = unmarshal_Offer(field) + else: + args["onetime_fees"] = None return OfferFailoverIpInfo(**args) @@ -497,10 +501,14 @@ def unmarshal_OfferServerInfo(data: Any) -> OfferServerInfo: field = data.get("rpn_version", None) if field is not None: args["rpn_version"] = field + else: + args["rpn_version"] = None field = data.get("onetime_fees", None) if field is not None: args["onetime_fees"] = unmarshal_Offer(field) + else: + args["onetime_fees"] = None return OfferServerInfo(**args) @@ -608,54 +616,80 @@ def unmarshal_Offer(data: Any) -> Offer: field = data.get("pricing", None) if field is not None: args["pricing"] = unmarshal_Money(field) + else: + args["pricing"] = None field = data.get("server_info", None) if field is not None: args["server_info"] = unmarshal_OfferServerInfo(field) + else: + args["server_info"] = None field = data.get("service_level_info", None) if field is not None: args["service_level_info"] = unmarshal_OfferServiceLevelInfo(field) + else: + args["service_level_info"] = None field = data.get("rpn_info", None) if field is not None: args["rpn_info"] = unmarshal_OfferRPNInfo(field) + else: + args["rpn_info"] = None field = data.get("san_info", None) if field is not None: args["san_info"] = unmarshal_OfferSANInfo(field) + else: + args["san_info"] = None field = data.get("antidos_info", None) if field is not None: args["antidos_info"] = unmarshal_OfferAntiDosInfo(field) + else: + args["antidos_info"] = None field = data.get("backup_info", None) if field is not None: args["backup_info"] = unmarshal_OfferBackupInfo(field) + else: + args["backup_info"] = None field = data.get("usb_storage_info", None) if field is not None: args["usb_storage_info"] = unmarshal_OfferStorageInfo(field) + else: + args["usb_storage_info"] = None field = data.get("storage_info", None) if field is not None: args["storage_info"] = unmarshal_OfferStorageInfo(field) + else: + args["storage_info"] = None field = data.get("license_info", None) if field is not None: args["license_info"] = unmarshal_OfferLicenseInfo(field) + else: + args["license_info"] = None field = data.get("failover_ip_info", None) if field is not None: args["failover_ip_info"] = unmarshal_OfferFailoverIpInfo(field) + else: + args["failover_ip_info"] = None field = data.get("failover_block_info", None) if field is not None: args["failover_block_info"] = unmarshal_OfferFailoverBlockInfo(field) + else: + args["failover_block_info"] = None field = data.get("bandwidth_info", None) if field is not None: args["bandwidth_info"] = unmarshal_OfferBandwidthInfo(field) + else: + args["bandwidth_info"] = None return Offer(**args) @@ -676,7 +710,7 @@ def unmarshal_OS(data: Any) -> OS: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -739,24 +773,34 @@ def unmarshal_OS(data: Any) -> OS: field = data.get("max_partitions", None) if field is not None: args["max_partitions"] = field + else: + args["max_partitions"] = None field = data.get("panel_password_regex", None) if field is not None: args["panel_password_regex"] = field + else: + args["panel_password_regex"] = None field = data.get("requires_valid_hostname", None) if field is not None: args["requires_valid_hostname"] = field + else: + args["requires_valid_hostname"] = None field = data.get("hostname_regex", None) if field is not None: args["hostname_regex"] = field + else: + args["hostname_regex"] = None field = data.get("released_at", None) if field is not None: args["released_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["released_at"] = None return OS(**args) @@ -800,6 +844,8 @@ def unmarshal_RpnSan(data: Any) -> RpnSan: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("offer_name", None) if field is not None: @@ -828,22 +874,30 @@ def unmarshal_RpnSan(data: Any) -> RpnSan: field = data.get("offer", None) if field is not None: args["offer"] = unmarshal_Offer(field) + else: + args["offer"] = None field = data.get("delivered_at", None) if field is not None: args["delivered_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["delivered_at"] = None field = data.get("terminated_at", None) if field is not None: args["terminated_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["terminated_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return RpnSan(**args) @@ -864,7 +918,7 @@ def unmarshal_RpnGroup(data: Any) -> RpnGroup: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -891,6 +945,8 @@ def unmarshal_RpnGroup(data: Any) -> RpnGroup: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return RpnGroup(**args) @@ -915,7 +971,7 @@ def unmarshal_NetworkInterface(data: Any) -> NetworkInterface: if field is not None: args["mac"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -966,18 +1022,26 @@ def unmarshal_ServerOption(data: Any) -> ServerOption: field = data.get("offer", None) if field is not None: args["offer"] = unmarshal_Offer(field) + else: + args["offer"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None return ServerOption(**args) @@ -1030,6 +1094,8 @@ def unmarshal_Server(data: Any) -> Server: args["rebooted_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["rebooted_at"] = None field = data.get("status", None) if field is not None: @@ -1084,34 +1150,50 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None field = data.get("offer", None) if field is not None: args["offer"] = unmarshal_Offer(field) + else: + args["offer"] = None field = data.get("location", None) if field is not None: args["location"] = unmarshal_ServerLocation(field) + else: + args["location"] = None field = data.get("os", None) if field is not None: args["os"] = unmarshal_OS(field) + else: + args["os"] = None field = data.get("level", None) if field is not None: args["level"] = unmarshal_ServiceLevel(field) + else: + args["level"] = None field = data.get("rescue_os", None) if field is not None: args["rescue_os"] = unmarshal_OS(field) + else: + args["rescue_os"] = None return Server(**args) @@ -1163,7 +1245,7 @@ def unmarshal_RpnV2Group(data: Any) -> RpnV2Group: if field is not None: args["project_id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -1186,10 +1268,14 @@ def unmarshal_RpnV2Group(data: Any) -> RpnV2Group: field = data.get("subnet", None) if field is not None: args["subnet"] = unmarshal_RpnV2GroupSubnet(field) + else: + args["subnet"] = None field = data.get("rpnv1_group", None) if field is not None: args["rpnv1_group"] = unmarshal_RpnGroup(field) + else: + args["rpnv1_group"] = None return RpnV2Group(**args) @@ -1210,37 +1296,49 @@ def unmarshal_Service(data: Any) -> Service: if field is not None: args["provisioning_status"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("resource_id", None) if field is not None: args["resource_id"] = field + else: + args["resource_id"] = None field = data.get("offer", None) if field is not None: args["offer"] = unmarshal_Offer(field) + else: + args["offer"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("delivered_at", None) if field is not None: args["delivered_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["delivered_at"] = None field = data.get("terminated_at", None) if field is not None: args["terminated_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["terminated_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return Service(**args) @@ -1324,25 +1422,33 @@ def unmarshal_FailoverIP(data: Any) -> FailoverIP: if field is not None: args["status"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("mac", None) if field is not None: args["mac"] = field + else: + args["mac"] = None field = data.get("server_id", None) if field is not None: args["server_id"] = field + else: + args["server_id"] = None field = data.get("block", None) if field is not None: args["block"] = unmarshal_FailoverBlock(field) + else: + args["block"] = None field = data.get("server_zone", None) if field is not None: args["server_zone"] = field + else: + args["server_zone"] = None return FailoverIP(**args) @@ -1374,6 +1480,8 @@ def unmarshal_BMCAccess(data: Any) -> BMCAccess: field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return BMCAccess(**args) @@ -1468,6 +1576,8 @@ def unmarshal_CanOrderResponse(data: Any) -> CanOrderResponse: field = data.get("message", None) if field is not None: args["message"] = field + else: + args["message"] = None return CanOrderResponse(**args) @@ -1579,6 +1689,8 @@ def unmarshal_GetRpnStatusResponse(data: Any) -> GetRpnStatusResponse: field = data.get("operations_left", None) if field is not None: args["operations_left"] = field + else: + args["operations_left"] = None return GetRpnStatusResponse(**args) @@ -1655,18 +1767,26 @@ def unmarshal_Invoice(data: Any) -> Invoice: field = data.get("total_with_taxes", None) if field is not None: args["total_with_taxes"] = unmarshal_Money(field) + else: + args["total_with_taxes"] = None field = data.get("total_without_taxes", None) if field is not None: args["total_without_taxes"] = unmarshal_Money(field) + else: + args["total_without_taxes"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("paid_at", None) if field is not None: args["paid_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["paid_at"] = None return Invoice(**args) @@ -1765,18 +1885,26 @@ def unmarshal_InvoiceSummary(data: Any) -> InvoiceSummary: field = data.get("total_with_taxes", None) if field is not None: args["total_with_taxes"] = unmarshal_Money(field) + else: + args["total_with_taxes"] = None field = data.get("total_without_taxes", None) if field is not None: args["total_without_taxes"] = unmarshal_Money(field) + else: + args["total_without_taxes"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("paid_at", None) if field is not None: args["paid_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["paid_at"] = None return InvoiceSummary(**args) @@ -1852,21 +1980,27 @@ def unmarshal_RpnSanIp(data: Any) -> RpnSanIp: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("server", None) if field is not None: args["server"] = unmarshal_RpnSanIpServer(field) + else: + args["server"] = None field = data.get("rpnv2_group", None) if field is not None: args["rpnv2_group"] = unmarshal_RpnSanIpRpnV2Group(field) + else: + args["rpnv2_group"] = None field = data.get("ip", None) if field is not None: args["ip"] = unmarshal_IP(field) + else: + args["ip"] = None return RpnSanIp(**args) @@ -1955,20 +2089,28 @@ def unmarshal_RefundSummary(data: Any) -> RefundSummary: field = data.get("total_with_taxes", None) if field is not None: args["total_with_taxes"] = unmarshal_Money(field) + else: + args["total_with_taxes"] = None field = data.get("total_without_taxes", None) if field is not None: args["total_without_taxes"] = unmarshal_Money(field) + else: + args["total_without_taxes"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("refunded_at", None) if field is not None: args["refunded_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["refunded_at"] = None return RefundSummary(**args) @@ -2106,14 +2248,20 @@ def unmarshal_RpnGroupMember(data: Any) -> RpnGroupMember: field = data.get("san_server", None) if field is not None: args["san_server"] = unmarshal_RpnSanServer(field) + else: + args["san_server"] = None field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None field = data.get("speed", None) if field is not None: args["speed"] = field + else: + args["speed"] = None return RpnGroupMember(**args) @@ -2220,6 +2368,8 @@ def unmarshal_RpnSanSummary(data: Any) -> RpnSanSummary: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("offer_name", None) if field is not None: @@ -2246,16 +2396,22 @@ def unmarshal_RpnSanSummary(data: Any) -> RpnSanSummary: args["delivered_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["delivered_at"] = None field = data.get("terminated_at", None) if field is not None: args["terminated_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["terminated_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return RpnSanSummary(**args) @@ -2328,10 +2484,14 @@ def unmarshal_RpnServerCapability(data: Any) -> RpnServerCapability: field = data.get("ip_address", None) if field is not None: args["ip_address"] = field + else: + args["ip_address"] = None field = data.get("rpn_version", None) if field is not None: args["rpn_version"] = field + else: + args["rpn_version"] = None return RpnServerCapability(**args) @@ -2407,14 +2567,20 @@ def unmarshal_RpnV2Member(data: Any) -> RpnV2Member: field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None field = data.get("rpnv1_group", None) if field is not None: args["rpnv1_group"] = unmarshal_RpnGroup(field) + else: + args["rpnv1_group"] = None field = data.get("speed", None) if field is not None: args["speed"] = field + else: + args["speed"] = None return RpnV2Member(**args) @@ -2442,20 +2608,28 @@ def unmarshal_Log(data: Any) -> Log: field = data.get("group", None) if field is not None: args["group"] = unmarshal_RpnV2Group(field) + else: + args["group"] = None field = data.get("member", None) if field is not None: args["member"] = unmarshal_RpnV2Member(field) + else: + args["member"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("finished_at", None) if field is not None: args["finished_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["finished_at"] = None return Log(**args) @@ -2537,7 +2711,7 @@ def unmarshal_ServerDisk(data: Any) -> ServerDisk: if field is not None: args["connector"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -2592,6 +2766,8 @@ def unmarshal_ServerEvent(data: Any) -> ServerEvent: field = data.get("date", None) if field is not None: args["date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["date"] = None return ServerEvent(**args) @@ -2648,14 +2824,20 @@ def unmarshal_ServerSummary(data: Any) -> ServerSummary: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None field = data.get("offer_id", None) if field is not None: @@ -2692,14 +2874,20 @@ def unmarshal_ServerSummary(data: Any) -> ServerSummary: field = data.get("os_id", None) if field is not None: args["os_id"] = field + else: + args["os_id"] = None field = data.get("level", None) if field is not None: args["level"] = unmarshal_ServiceLevel(field) + else: + args["level"] = None field = data.get("rpn_version", None) if field is not None: args["rpn_version"] = field + else: + args["rpn_version"] = None return ServerSummary(**args) @@ -2834,20 +3022,28 @@ def unmarshal_Refund(data: Any) -> Refund: field = data.get("total_with_taxes", None) if field is not None: args["total_with_taxes"] = unmarshal_Money(field) + else: + args["total_with_taxes"] = None field = data.get("total_without_taxes", None) if field is not None: args["total_without_taxes"] = unmarshal_Money(field) + else: + args["total_without_taxes"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("refunded_at", None) if field is not None: args["refunded_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["refunded_at"] = None return Refund(**args) @@ -2887,7 +3083,7 @@ def unmarshal_Partition(data: Any) -> Partition: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -2910,6 +3106,8 @@ def unmarshal_Partition(data: Any) -> Partition: field = data.get("mount_point", None) if field is not None: args["mount_point"] = field + else: + args["mount_point"] = None return Partition(**args) @@ -2964,10 +3162,14 @@ def unmarshal_ServerInstall(data: Any) -> ServerInstall: field = data.get("user_login", None) if field is not None: args["user_login"] = field + else: + args["user_login"] = None field = data.get("panel_url", None) if field is not None: args["panel_url"] = field + else: + args["panel_url"] = None return ServerInstall(**args) diff --git a/scaleway/scaleway/document_db/v1beta1/marshalling.py b/scaleway/scaleway/document_db/v1beta1/marshalling.py index 8efeb6af8..b4b876ca3 100644 --- a/scaleway/scaleway/document_db/v1beta1/marshalling.py +++ b/scaleway/scaleway/document_db/v1beta1/marshalling.py @@ -155,26 +155,38 @@ def unmarshal_Endpoint(data: Any) -> Endpoint: field = data.get("ip", None) if field is not None: args["ip"] = field + else: + args["ip"] = None field = data.get("name", None) if field is not None: args["name"] = field + else: + args["name"] = None field = data.get("private_network", None) if field is not None: args["private_network"] = unmarshal_EndpointPrivateNetworkDetails(field) + else: + args["private_network"] = None field = data.get("load_balancer", None) if field is not None: args["load_balancer"] = unmarshal_EndpointLoadBalancerDetails(field) + else: + args["load_balancer"] = None field = data.get("direct_access", None) if field is not None: args["direct_access"] = unmarshal_EndpointDirectAccessDetails(field) + else: + args["direct_access"] = None field = data.get("hostname", None) if field is not None: args["hostname"] = field + else: + args["hostname"] = None return Endpoint(**args) @@ -198,18 +210,26 @@ def unmarshal_Maintenance(data: Any) -> Maintenance: field = data.get("starts_at", None) if field is not None: args["starts_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["starts_at"] = None field = data.get("stops_at", None) if field is not None: args["stops_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["stops_at"] = None field = data.get("closed_at", None) if field is not None: args["closed_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["closed_at"] = None field = data.get("forced_at", None) if field is not None: args["forced_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["forced_at"] = None return Maintenance(**args) @@ -301,14 +321,20 @@ def unmarshal_InstanceLog(data: Any) -> InstanceLog: field = data.get("download_url", None) if field is not None: args["download_url"] = field + else: + args["download_url"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return InstanceLog(**args) @@ -338,6 +364,8 @@ def unmarshal_BackupSchedule(data: Any) -> BackupSchedule: args["next_run_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["next_run_at"] = None return BackupSchedule(**args) @@ -372,10 +400,14 @@ def unmarshal_LogsPolicy(data: Any) -> LogsPolicy: field = data.get("max_age_retention", None) if field is not None: args["max_age_retention"] = field + else: + args["max_age_retention"] = None field = data.get("total_disk_retention", None) if field is not None: args["total_disk_retention"] = field + else: + args["total_disk_retention"] = None return LogsPolicy(**args) @@ -415,7 +447,7 @@ def unmarshal_Volume(data: Any) -> Volume: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -423,7 +455,7 @@ def unmarshal_Volume(data: Any) -> Volume: if field is not None: args["size"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -457,10 +489,14 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None field = data.get("project_id", None) if field is not None: @@ -499,10 +535,14 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("endpoint", None) if field is not None: args["endpoint"] = unmarshal_Endpoint(field) + else: + args["endpoint"] = None field = data.get("backup_schedule", None) if field is not None: args["backup_schedule"] = unmarshal_BackupSchedule(field) + else: + args["backup_schedule"] = None field = data.get("read_replicas", None) if field is not None: @@ -539,6 +579,8 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("logs_policy", None) if field is not None: args["logs_policy"] = unmarshal_LogsPolicy(field) + else: + args["logs_policy"] = None return Instance(**args) @@ -574,11 +616,11 @@ def unmarshal_SnapshotVolumeType(data: Any) -> SnapshotVolumeType: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -624,22 +666,32 @@ def unmarshal_Snapshot(data: Any) -> Snapshot: field = data.get("size", None) if field is not None: args["size"] = field + else: + args["size"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("volume_type", None) if field is not None: args["volume_type"] = unmarshal_SnapshotVolumeType(field) + else: + args["volume_type"] = None return Snapshot(**args) @@ -694,6 +746,8 @@ def unmarshal_ACLRule(data: Any) -> ACLRule: field = data.get("port", None) if field is not None: args["port"] = field + else: + args["port"] = None return ACLRule(**args) @@ -818,26 +872,38 @@ def unmarshal_EngineSetting(data: Any) -> EngineSetting: field = data.get("unit", None) if field is not None: args["unit"] = field + else: + args["unit"] = None field = data.get("string_constraint", None) if field is not None: args["string_constraint"] = field + else: + args["string_constraint"] = None field = data.get("int_min", None) if field is not None: args["int_min"] = field + else: + args["int_min"] = None field = data.get("int_max", None) if field is not None: args["int_max"] = field + else: + args["int_max"] = None field = data.get("float_min", None) if field is not None: args["float_min"] = field + else: + args["float_min"] = None field = data.get("float_max", None) if field is not None: args["float_max"] = field + else: + args["float_max"] = None return EngineSetting(**args) @@ -883,6 +949,8 @@ def unmarshal_EngineVersion(data: Any) -> EngineVersion: args["end_of_life"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["end_of_life"] = None return EngineVersion(**args) @@ -1089,7 +1157,7 @@ def unmarshal_NodeTypeVolumeType(data: Any) -> NodeTypeVolumeType: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -1109,7 +1177,7 @@ def unmarshal_NodeTypeVolumeType(data: Any) -> NodeTypeVolumeType: if field is not None: args["chunk_size"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -1155,10 +1223,14 @@ def unmarshal_NodeType(data: Any) -> NodeType: field = data.get("volume_constraint", None) if field is not None: args["volume_constraint"] = unmarshal_NodeTypeVolumeConstraintSizes(field) + else: + args["volume_constraint"] = None field = data.get("is_bssd_compatible", None) if field is not None: args["is_bssd_compatible"] = field + else: + args["is_bssd_compatible"] = None field = data.get("available_volume_types", None) if field is not None: diff --git a/scaleway/scaleway/domain/v2beta1/marshalling.py b/scaleway/scaleway/domain/v2beta1/marshalling.py index c198aad52..fdeab0c31 100644 --- a/scaleway/scaleway/domain/v2beta1/marshalling.py +++ b/scaleway/scaleway/domain/v2beta1/marshalling.py @@ -137,6 +137,8 @@ def unmarshal_ContactExtensionFRAssociationInfo( args["publication_jo"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["publication_jo"] = None return ContactExtensionFRAssociationInfo(**args) @@ -241,24 +243,34 @@ def unmarshal_ContactExtensionFR(data: Any) -> ContactExtensionFR: field = data.get("individual_info", None) if field is not None: args["individual_info"] = unmarshal_ContactExtensionFRIndividualInfo(field) + else: + args["individual_info"] = None field = data.get("duns_info", None) if field is not None: args["duns_info"] = unmarshal_ContactExtensionFRDunsInfo(field) + else: + args["duns_info"] = None field = data.get("association_info", None) if field is not None: args["association_info"] = unmarshal_ContactExtensionFRAssociationInfo(field) + else: + args["association_info"] = None field = data.get("trademark_info", None) if field is not None: args["trademark_info"] = unmarshal_ContactExtensionFRTrademarkInfo(field) + else: + args["trademark_info"] = None field = data.get("code_auth_afnic_info", None) if field is not None: args["code_auth_afnic_info"] = unmarshal_ContactExtensionFRCodeAuthAfnicInfo( field ) + else: + args["code_auth_afnic_info"] = None return ContactExtensionFR(**args) @@ -390,14 +402,20 @@ def unmarshal_Contact(data: Any) -> Contact: args["questions"] = ( [unmarshal_ContactQuestion(v) for v in field] if field is not None else None ) + else: + args["questions"] = None field = data.get("extension_fr", None) if field is not None: args["extension_fr"] = unmarshal_ContactExtensionFR(field) + else: + args["extension_fr"] = None field = data.get("extension_eu", None) if field is not None: args["extension_eu"] = unmarshal_ContactExtensionEU(field) + else: + args["extension_eu"] = None field = data.get("email_status", None) if field is not None: @@ -414,6 +432,8 @@ def unmarshal_Contact(data: Any) -> Contact: field = data.get("extension_nl", None) if field is not None: args["extension_nl"] = unmarshal_ContactExtensionNL(field) + else: + args["extension_nl"] = None return Contact(**args) @@ -463,10 +483,14 @@ def unmarshal_DNSZone(data: Any) -> DNSZone: field = data.get("message", None) if field is not None: args["message"] = field + else: + args["message"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return DNSZone(**args) @@ -529,10 +553,14 @@ def unmarshal_SSLCertificate(data: Any) -> SSLCertificate: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None return SSLCertificate(**args) @@ -554,6 +582,8 @@ def unmarshal_CheckContactsCompatibilityResponseContactCheckResult( field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None return CheckContactsCompatibilityResponseContactCheckResult(**args) @@ -577,18 +607,24 @@ def unmarshal_CheckContactsCompatibilityResponse( args["owner_check_result"] = ( unmarshal_CheckContactsCompatibilityResponseContactCheckResult(field) ) + else: + args["owner_check_result"] = None field = data.get("administrative_check_result", None) if field is not None: args["administrative_check_result"] = ( unmarshal_CheckContactsCompatibilityResponseContactCheckResult(field) ) + else: + args["administrative_check_result"] = None field = data.get("technical_check_result", None) if field is not None: args["technical_check_result"] = ( unmarshal_CheckContactsCompatibilityResponseContactCheckResult(field) ) + else: + args["technical_check_result"] = None return CheckContactsCompatibilityResponse(**args) @@ -660,7 +696,7 @@ def unmarshal_DSRecordDigest(data: Any) -> DSRecordDigest: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -671,6 +707,8 @@ def unmarshal_DSRecordDigest(data: Any) -> DSRecordDigest: field = data.get("public_key", None) if field is not None: args["public_key"] = unmarshal_DSRecordPublicKey(field) + else: + args["public_key"] = None return DSRecordDigest(**args) @@ -694,10 +732,14 @@ def unmarshal_DSRecord(data: Any) -> DSRecord: field = data.get("digest", None) if field is not None: args["digest"] = unmarshal_DSRecordDigest(field) + else: + args["digest"] = None field = data.get("public_key", None) if field is not None: args["public_key"] = unmarshal_DSRecordPublicKey(field) + else: + args["public_key"] = None return DSRecord(**args) @@ -721,6 +763,8 @@ def unmarshal_TldOffer(data: Any) -> TldOffer: field = data.get("price", None) if field is not None: args["price"] = unmarshal_Money(field) + else: + args["price"] = None return TldOffer(**args) @@ -870,14 +914,20 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("dnssec", None) if field is not None: args["dnssec"] = unmarshal_DomainDNSSEC(field) + else: + args["dnssec"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("status", None) if field is not None: @@ -902,30 +952,42 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("owner_contact", None) if field is not None: args["owner_contact"] = unmarshal_Contact(field) + else: + args["owner_contact"] = None field = data.get("technical_contact", None) if field is not None: args["technical_contact"] = unmarshal_Contact(field) + else: + args["technical_contact"] = None field = data.get("administrative_contact", None) if field is not None: args["administrative_contact"] = unmarshal_Contact(field) + else: + args["administrative_contact"] = None field = data.get("external_domain_registration_status", None) if field is not None: args["external_domain_registration_status"] = ( unmarshal_DomainRegistrationStatusExternalDomain(field) ) + else: + args["external_domain_registration_status"] = None field = data.get("transfer_registration_status", None) if field is not None: args["transfer_registration_status"] = ( unmarshal_DomainRegistrationStatusTransfer(field) ) + else: + args["transfer_registration_status"] = None field = data.get("tld", None) if field is not None: args["tld"] = unmarshal_Tld(field) + else: + args["tld"] = None return Domain(**args) @@ -1062,10 +1124,14 @@ def unmarshal_RecordHTTPServiceConfig(data: Any) -> RecordHTTPServiceConfig: field = data.get("must_contain", None) if field is not None: args["must_contain"] = field + else: + args["must_contain"] = None field = data.get("user_agent", None) if field is not None: args["user_agent"] = field + else: + args["user_agent"] = None return RecordHTTPServiceConfig(**args) @@ -1132,7 +1198,7 @@ def unmarshal_Record(data: Any) -> Record: if field is not None: args["ttl"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -1143,22 +1209,32 @@ def unmarshal_Record(data: Any) -> Record: field = data.get("comment", None) if field is not None: args["comment"] = field + else: + args["comment"] = None field = data.get("geo_ip_config", None) if field is not None: args["geo_ip_config"] = unmarshal_RecordGeoIPConfig(field) + else: + args["geo_ip_config"] = None field = data.get("http_service_config", None) if field is not None: args["http_service_config"] = unmarshal_RecordHTTPServiceConfig(field) + else: + args["http_service_config"] = None field = data.get("weighted_config", None) if field is not None: args["weighted_config"] = unmarshal_RecordWeightedConfig(field) + else: + args["weighted_config"] = None field = data.get("view_config", None) if field is not None: args["view_config"] = unmarshal_RecordViewConfig(field) + else: + args["view_config"] = None return Record(**args) @@ -1175,17 +1251,21 @@ def unmarshal_RecordIdentifier(data: Any) -> RecordIdentifier: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("data", None) if field is not None: args["data"] = field + else: + args["data"] = None field = data.get("ttl", None) if field is not None: args["ttl"] = field + else: + args["ttl"] = None return RecordIdentifier(**args) @@ -1229,10 +1309,14 @@ def unmarshal_RecordChangeDelete(data: Any) -> RecordChangeDelete: field = data.get("id", None) if field is not None: args["id"] = field + else: + args["id"] = None field = data.get("id_fields", None) if field is not None: args["id_fields"] = unmarshal_RecordIdentifier(field) + else: + args["id_fields"] = None return RecordChangeDelete(**args) @@ -1254,10 +1338,14 @@ def unmarshal_RecordChangeSet(data: Any) -> RecordChangeSet: field = data.get("id", None) if field is not None: args["id"] = field + else: + args["id"] = None field = data.get("id_fields", None) if field is not None: args["id_fields"] = unmarshal_RecordIdentifier(field) + else: + args["id_fields"] = None return RecordChangeSet(**args) @@ -1273,18 +1361,26 @@ def unmarshal_RecordChange(data: Any) -> RecordChange: field = data.get("add", None) if field is not None: args["add"] = unmarshal_RecordChangeAdd(field) + else: + args["add"] = None - field = data.get("set_", None) + field = data.get("set", None) if field is not None: args["set_"] = unmarshal_RecordChangeSet(field) + else: + args["set_"] = None field = data.get("delete", None) if field is not None: args["delete"] = unmarshal_RecordChangeDelete(field) + else: + args["delete"] = None field = data.get("clear", None) if field is not None: args["clear"] = unmarshal_RecordChangeClear(field) + else: + args["clear"] = None return RecordChange(**args) @@ -1397,6 +1493,8 @@ def unmarshal_ContactRoles(data: Any) -> ContactRoles: field = data.get("contact", None) if field is not None: args["contact"] = unmarshal_Contact(field) + else: + args["contact"] = None return ContactRoles(**args) @@ -1519,6 +1617,8 @@ def unmarshal_DNSZoneVersion(data: Any) -> DNSZoneVersion: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return DNSZoneVersion(**args) @@ -1625,10 +1725,14 @@ def unmarshal_DomainSummary(data: Any) -> DomainSummary: field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("status", None) if field is not None: @@ -1647,16 +1751,22 @@ def unmarshal_DomainSummary(data: Any) -> DomainSummary: args["external_domain_registration_status"] = ( unmarshal_DomainRegistrationStatusExternalDomain(field) ) + else: + args["external_domain_registration_status"] = None field = data.get("transfer_registration_status", None) if field is not None: args["transfer_registration_status"] = ( unmarshal_DomainRegistrationStatusTransfer(field) ) + else: + args["transfer_registration_status"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return DomainSummary(**args) @@ -1709,32 +1819,44 @@ def unmarshal_RenewableDomain(data: Any) -> RenewableDomain: field = data.get("renewable_duration_in_years", None) if field is not None: args["renewable_duration_in_years"] = field + else: + args["renewable_duration_in_years"] = None field = data.get("expired_at", None) if field is not None: args["expired_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expired_at"] = None field = data.get("limit_renew_at", None) if field is not None: args["limit_renew_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["limit_renew_at"] = None field = data.get("limit_redemption_at", None) if field is not None: args["limit_redemption_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["limit_redemption_at"] = None field = data.get("estimated_delete_at", None) if field is not None: args["estimated_delete_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["estimated_delete_at"] = None field = data.get("tld", None) if field is not None: args["tld"] = unmarshal_Tld(field) + else: + args["tld"] = None return RenewableDomain(**args) @@ -1801,7 +1923,7 @@ def unmarshal_Task(data: Any) -> Task: if field is not None: args["organization_id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -1812,22 +1934,32 @@ def unmarshal_Task(data: Any) -> Task: field = data.get("domain", None) if field is not None: args["domain"] = field + else: + args["domain"] = None field = data.get("started_at", None) if field is not None: args["started_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["started_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("message", None) if field is not None: args["message"] = field + else: + args["message"] = None field = data.get("contact_identifier", None) if field is not None: args["contact_identifier"] = field + else: + args["contact_identifier"] = None return Task(**args) @@ -1899,6 +2031,8 @@ def unmarshal_OrderResponse(data: Any) -> OrderResponse: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return OrderResponse(**args) @@ -1949,6 +2083,8 @@ def unmarshal_RegisterExternalDomainResponse( field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return RegisterExternalDomainResponse(**args) @@ -1983,6 +2119,8 @@ def unmarshal_AvailableDomain(data: Any) -> AvailableDomain: field = data.get("tld", None) if field is not None: args["tld"] = unmarshal_Tld(field) + else: + args["tld"] = None return AvailableDomain(**args) diff --git a/scaleway/scaleway/flexibleip/v1alpha1/marshalling.py b/scaleway/scaleway/flexibleip/v1alpha1/marshalling.py index 5f44ed7d6..6f754d003 100644 --- a/scaleway/scaleway/flexibleip/v1alpha1/marshalling.py +++ b/scaleway/scaleway/flexibleip/v1alpha1/marshalling.py @@ -52,10 +52,14 @@ def unmarshal_MACAddress(data: Any) -> MACAddress: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return MACAddress(**args) @@ -99,6 +103,8 @@ def unmarshal_FlexibleIP(data: Any) -> FlexibleIP: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("reverse", None) if field is not None: @@ -111,14 +117,20 @@ def unmarshal_FlexibleIP(data: Any) -> FlexibleIP: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("mac_address", None) if field is not None: args["mac_address"] = unmarshal_MACAddress(field) + else: + args["mac_address"] = None field = data.get("server_id", None) if field is not None: args["server_id"] = field + else: + args["server_id"] = None return FlexibleIP(**args) diff --git a/scaleway/scaleway/function/v1beta1/marshalling.py b/scaleway/scaleway/function/v1beta1/marshalling.py index 34ff2d4d7..23c4b6ef4 100644 --- a/scaleway/scaleway/function/v1beta1/marshalling.py +++ b/scaleway/scaleway/function/v1beta1/marshalling.py @@ -79,6 +79,8 @@ def unmarshal_Cron(data: Any) -> Cron: field = data.get("args", None) if field is not None: args["args"] = field + else: + args["args"] = None return Cron(**args) @@ -114,6 +116,8 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None return Domain(**args) @@ -220,18 +224,26 @@ def unmarshal_Function(data: Any) -> Function: field = data.get("timeout", None) if field is not None: args["timeout"] = field + else: + args["timeout"] = None field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("build_message", None) if field is not None: args["build_message"] = field + else: + args["build_message"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return Function(**args) @@ -291,10 +303,14 @@ def unmarshal_Namespace(data: Any) -> Namespace: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return Namespace(**args) @@ -322,22 +338,32 @@ def unmarshal_Token(data: Any) -> Token: field = data.get("function_id", None) if field is not None: args["function_id"] = field + else: + args["function_id"] = None field = data.get("namespace_id", None) if field is not None: args["namespace_id"] = field + else: + args["namespace_id"] = None field = data.get("public_key", None) if field is not None: args["public_key"] = field + else: + args["public_key"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return Token(**args) @@ -369,6 +395,8 @@ def unmarshal_TriggerMnqNatsClientConfig(data: Any) -> TriggerMnqNatsClientConfi field = data.get("mnq_credential_id", None) if field is not None: args["mnq_credential_id"] = field + else: + args["mnq_credential_id"] = None return TriggerMnqNatsClientConfig(**args) @@ -396,6 +424,8 @@ def unmarshal_TriggerMnqSqsClientConfig(data: Any) -> TriggerMnqSqsClientConfig: field = data.get("mnq_credential_id", None) if field is not None: args["mnq_credential_id"] = field + else: + args["mnq_credential_id"] = None return TriggerMnqSqsClientConfig(**args) @@ -462,18 +492,26 @@ def unmarshal_Trigger(data: Any) -> Trigger: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("scw_sqs_config", None) if field is not None: args["scw_sqs_config"] = unmarshal_TriggerMnqSqsClientConfig(field) + else: + args["scw_sqs_config"] = None field = data.get("scw_nats_config", None) if field is not None: args["scw_nats_config"] = unmarshal_TriggerMnqNatsClientConfig(field) + else: + args["scw_nats_config"] = None field = data.get("sqs_config", None) if field is not None: args["sqs_config"] = unmarshal_TriggerSqsClientConfig(field) + else: + args["sqs_config"] = None return Trigger(**args) diff --git a/scaleway/scaleway/iam/v1alpha1/marshalling.py b/scaleway/scaleway/iam/v1alpha1/marshalling.py index 9895c9550..5f4410d94 100644 --- a/scaleway/scaleway/iam/v1alpha1/marshalling.py +++ b/scaleway/scaleway/iam/v1alpha1/marshalling.py @@ -87,14 +87,20 @@ def unmarshal_JWT(data: Any) -> JWT: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return JWT(**args) @@ -130,26 +136,38 @@ def unmarshal_APIKey(data: Any) -> APIKey: field = data.get("secret_key", None) if field is not None: args["secret_key"] = field + else: + args["secret_key"] = None field = data.get("application_id", None) if field is not None: args["application_id"] = field + else: + args["application_id"] = None field = data.get("user_id", None) if field is not None: args["user_id"] = field + else: + args["user_id"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None return APIKey(**args) @@ -193,10 +211,14 @@ def unmarshal_Application(data: Any) -> Application: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Application(**args) @@ -240,10 +262,14 @@ def unmarshal_Group(data: Any) -> Group: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Group(**args) @@ -291,6 +317,8 @@ def unmarshal_Log(data: Any) -> Log: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return Log(**args) @@ -322,10 +350,14 @@ def unmarshal_Policy(data: Any) -> Policy: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("editable", None) if field is not None: @@ -350,18 +382,26 @@ def unmarshal_Policy(data: Any) -> Policy: field = data.get("user_id", None) if field is not None: args["user_id"] = field + else: + args["user_id"] = None field = data.get("group_id", None) if field is not None: args["group_id"] = field + else: + args["group_id"] = None field = data.get("application_id", None) if field is not None: args["application_id"] = field + else: + args["application_id"] = None field = data.get("no_principal", None) if field is not None: args["no_principal"] = field + else: + args["no_principal"] = None return Policy(**args) @@ -393,10 +433,14 @@ def unmarshal_Quotum(data: Any) -> Quotum: field = data.get("limit", None) if field is not None: args["limit"] = field + else: + args["limit"] = None field = data.get("unlimited", None) if field is not None: args["unlimited"] = field + else: + args["unlimited"] = None return Quotum(**args) @@ -440,10 +484,14 @@ def unmarshal_SSHKey(data: Any) -> SSHKey: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return SSHKey(**args) @@ -472,7 +520,7 @@ def unmarshal_User(data: Any) -> User: if field is not None: args["deletable"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -483,6 +531,8 @@ def unmarshal_User(data: Any) -> User: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("mfa", None) if field is not None: @@ -499,16 +549,22 @@ def unmarshal_User(data: Any) -> User: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("last_login_at", None) if field is not None: args["last_login_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_login_at"] = None field = data.get("two_factor_enabled", None) if field is not None: args["two_factor_enabled"] = field + else: + args["two_factor_enabled"] = None return User(**args) @@ -532,6 +588,8 @@ def unmarshal_EncodedJWT(data: Any) -> EncodedJWT: field = data.get("jwt", None) if field is not None: args["jwt"] = unmarshal_JWT(field) + else: + args["jwt"] = None return EncodedJWT(**args) @@ -664,6 +722,8 @@ def unmarshal_PermissionSet(data: Any) -> PermissionSet: field = data.get("categories", None) if field is not None: args["categories"] = field + else: + args["categories"] = None return PermissionSet(**args) @@ -750,18 +810,26 @@ def unmarshal_Rule(data: Any) -> Rule: field = data.get("permission_set_names", None) if field is not None: args["permission_set_names"] = field + else: + args["permission_set_names"] = None field = data.get("project_ids", None) if field is not None: args["project_ids"] = field + else: + args["project_ids"] = None field = data.get("organization_id", None) if field is not None: args["organization_id"] = field + else: + args["organization_id"] = None field = data.get("account_root_user_id", None) if field is not None: args["account_root_user_id"] = field + else: + args["account_root_user_id"] = None return Rule(**args) diff --git a/scaleway/scaleway/instance/v1/marshalling.py b/scaleway/scaleway/instance/v1/marshalling.py index e5fc59e05..24987fab2 100644 --- a/scaleway/scaleway/instance/v1/marshalling.py +++ b/scaleway/scaleway/instance/v1/marshalling.py @@ -294,18 +294,24 @@ def unmarshal_Volume(data: Any) -> Volume: field = data.get("export_uri", None) if field is not None: args["export_uri"] = field + else: + args["export_uri"] = None field = data.get("creation_date", None) if field is not None: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("modification_date", None) if field is not None: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None field = data.get("tags", None) if field is not None: @@ -322,6 +328,8 @@ def unmarshal_Volume(data: Any) -> Volume: field = data.get("server", None) if field is not None: args["server"] = unmarshal_ServerSummary(field) + else: + args["server"] = None return Volume(**args) @@ -394,16 +402,22 @@ def unmarshal_Image(data: Any) -> Image: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("modification_date", None) if field is not None: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None field = data.get("default_bootscript", None) if field is not None: args["default_bootscript"] = unmarshal_Bootscript(field) + else: + args["default_bootscript"] = None field = data.get("public", None) if field is not None: @@ -428,6 +442,8 @@ def unmarshal_Image(data: Any) -> Image: field = data.get("root_volume", None) if field is not None: args["root_volume"] = unmarshal_VolumeSummary(field) + else: + args["root_volume"] = None return Image(**args) @@ -618,6 +634,8 @@ def unmarshal_ServerMaintenance(data: Any) -> ServerMaintenance: field = data.get("start_date", None) if field is not None: args["start_date"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["start_date"] = None return ServerMaintenance(**args) @@ -653,6 +671,8 @@ def unmarshal_VolumeServer(data: Any) -> VolumeServer: field = data.get("server", None) if field is not None: args["server"] = unmarshal_ServerSummary(field) + else: + args["server"] = None field = data.get("volume_type", None) if field is not None: @@ -679,12 +699,16 @@ def unmarshal_VolumeServer(data: Any) -> VolumeServer: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("modification_date", None) if field is not None: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None return VolumeServer(**args) @@ -732,6 +756,8 @@ def unmarshal_Server(data: Any) -> Server: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("dynamic_ip_required", None) if field is not None: @@ -752,18 +778,26 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("enable_ipv6", None) if field is not None: args["enable_ipv6"] = field + else: + args["enable_ipv6"] = None field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) + else: + args["image"] = None field = data.get("private_ip", None) if field is not None: args["private_ip"] = field + else: + args["private_ip"] = None field = data.get("public_ip", None) if field is not None: args["public_ip"] = unmarshal_ServerIp(field) + else: + args["public_ip"] = None field = data.get("public_ips", None) if field is not None: @@ -796,22 +830,32 @@ def unmarshal_Server(data: Any) -> Server: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None field = data.get("location", None) if field is not None: args["location"] = unmarshal_ServerLocation(field) + else: + args["location"] = None field = data.get("ipv6", None) if field is not None: args["ipv6"] = unmarshal_ServerIpv6(field) + else: + args["ipv6"] = None field = data.get("bootscript", None) if field is not None: args["bootscript"] = unmarshal_Bootscript(field) + else: + args["bootscript"] = None field = data.get("security_group", None) if field is not None: args["security_group"] = unmarshal_SecurityGroupSummary(field) + else: + args["security_group"] = None field = data.get("maintenances", None) if field is not None: @@ -842,6 +886,8 @@ def unmarshal_Server(data: Any) -> Server: field = data.get("placement_group", None) if field is not None: args["placement_group"] = unmarshal_PlacementGroup(field) + else: + args["placement_group"] = None return Server(**args) @@ -857,6 +903,8 @@ def unmarshal_AttachServerVolumeResponse(data: Any) -> AttachServerVolumeRespons field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return AttachServerVolumeResponse(**args) @@ -872,6 +920,8 @@ def unmarshal_CreateImageResponse(data: Any) -> CreateImageResponse: field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) + else: + args["image"] = None return CreateImageResponse(**args) @@ -904,7 +954,7 @@ def unmarshal_Ip(data: Any) -> Ip: if field is not None: args["project"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -927,10 +977,14 @@ def unmarshal_Ip(data: Any) -> Ip: field = data.get("reverse", None) if field is not None: args["reverse"] = field + else: + args["reverse"] = None field = data.get("server", None) if field is not None: args["server"] = unmarshal_ServerSummary(field) + else: + args["server"] = None return Ip(**args) @@ -946,6 +1000,8 @@ def unmarshal_CreateIpResponse(data: Any) -> CreateIpResponse: field = data.get("ip", None) if field is not None: args["ip"] = unmarshal_Ip(field) + else: + args["ip"] = None return CreateIpResponse(**args) @@ -961,6 +1017,8 @@ def unmarshal_CreatePlacementGroupResponse(data: Any) -> CreatePlacementGroupRes field = data.get("placement_group", None) if field is not None: args["placement_group"] = unmarshal_PlacementGroup(field) + else: + args["placement_group"] = None return CreatePlacementGroupResponse(**args) @@ -976,6 +1034,8 @@ def unmarshal_CreatePrivateNICResponse(data: Any) -> CreatePrivateNICResponse: field = data.get("private_nic", None) if field is not None: args["private_nic"] = unmarshal_PrivateNIC(field) + else: + args["private_nic"] = None return CreatePrivateNICResponse(**args) @@ -1049,18 +1109,24 @@ def unmarshal_SecurityGroup(data: Any) -> SecurityGroup: field = data.get("organization_default", None) if field is not None: args["organization_default"] = field + else: + args["organization_default"] = None field = data.get("creation_date", None) if field is not None: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("modification_date", None) if field is not None: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None return SecurityGroup(**args) @@ -1076,6 +1142,8 @@ def unmarshal_CreateSecurityGroupResponse(data: Any) -> CreateSecurityGroupRespo field = data.get("security_group", None) if field is not None: args["security_group"] = unmarshal_SecurityGroup(field) + else: + args["security_group"] = None return CreateSecurityGroupResponse(**args) @@ -1123,10 +1191,14 @@ def unmarshal_SecurityGroupRule(data: Any) -> SecurityGroupRule: field = data.get("dest_port_from", None) if field is not None: args["dest_port_from"] = field + else: + args["dest_port_from"] = None field = data.get("dest_port_to", None) if field is not None: args["dest_port_to"] = field + else: + args["dest_port_to"] = None return SecurityGroupRule(**args) @@ -1144,6 +1216,8 @@ def unmarshal_CreateSecurityGroupRuleResponse( field = data.get("rule", None) if field is not None: args["rule"] = unmarshal_SecurityGroupRule(field) + else: + args["rule"] = None return CreateSecurityGroupRuleResponse(**args) @@ -1159,6 +1233,8 @@ def unmarshal_CreateServerResponse(data: Any) -> CreateServerResponse: field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return CreateServerResponse(**args) @@ -1229,22 +1305,30 @@ def unmarshal_Snapshot(data: Any) -> Snapshot: field = data.get("base_volume", None) if field is not None: args["base_volume"] = unmarshal_SnapshotBaseVolume(field) + else: + args["base_volume"] = None field = data.get("creation_date", None) if field is not None: args["creation_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["creation_date"] = None field = data.get("modification_date", None) if field is not None: args["modification_date"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["modification_date"] = None field = data.get("error_reason", None) if field is not None: args["error_reason"] = field + else: + args["error_reason"] = None return Snapshot(**args) @@ -1288,12 +1372,16 @@ def unmarshal_Task(data: Any) -> Task: field = data.get("started_at", None) if field is not None: args["started_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["started_at"] = None field = data.get("terminated_at", None) if field is not None: args["terminated_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["terminated_at"] = None return Task(**args) @@ -1309,10 +1397,14 @@ def unmarshal_CreateSnapshotResponse(data: Any) -> CreateSnapshotResponse: field = data.get("snapshot", None) if field is not None: args["snapshot"] = unmarshal_Snapshot(field) + else: + args["snapshot"] = None field = data.get("task", None) if field is not None: args["task"] = unmarshal_Task(field) + else: + args["task"] = None return CreateSnapshotResponse(**args) @@ -1328,6 +1420,8 @@ def unmarshal_CreateVolumeResponse(data: Any) -> CreateVolumeResponse: field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None return CreateVolumeResponse(**args) @@ -1343,6 +1437,8 @@ def unmarshal_DetachServerVolumeResponse(data: Any) -> DetachServerVolumeRespons field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return DetachServerVolumeResponse(**args) @@ -1358,6 +1454,8 @@ def unmarshal_ExportSnapshotResponse(data: Any) -> ExportSnapshotResponse: field = data.get("task", None) if field is not None: args["task"] = unmarshal_Task(field) + else: + args["task"] = None return ExportSnapshotResponse(**args) @@ -1373,6 +1471,8 @@ def unmarshal_GetBootscriptResponse(data: Any) -> GetBootscriptResponse: field = data.get("bootscript", None) if field is not None: args["bootscript"] = unmarshal_Bootscript(field) + else: + args["bootscript"] = None return GetBootscriptResponse(**args) @@ -1459,6 +1559,8 @@ def unmarshal_GetDashboardResponse(data: Any) -> GetDashboardResponse: field = data.get("dashboard", None) if field is not None: args["dashboard"] = unmarshal_Dashboard(field) + else: + args["dashboard"] = None return GetDashboardResponse(**args) @@ -1474,6 +1576,8 @@ def unmarshal_GetImageResponse(data: Any) -> GetImageResponse: field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) + else: + args["image"] = None return GetImageResponse(**args) @@ -1489,6 +1593,8 @@ def unmarshal_GetIpResponse(data: Any) -> GetIpResponse: field = data.get("ip", None) if field is not None: args["ip"] = unmarshal_Ip(field) + else: + args["ip"] = None return GetIpResponse(**args) @@ -1504,6 +1610,8 @@ def unmarshal_GetPlacementGroupResponse(data: Any) -> GetPlacementGroupResponse: field = data.get("placement_group", None) if field is not None: args["placement_group"] = unmarshal_PlacementGroup(field) + else: + args["placement_group"] = None return GetPlacementGroupResponse(**args) @@ -1563,6 +1671,8 @@ def unmarshal_GetPrivateNICResponse(data: Any) -> GetPrivateNICResponse: field = data.get("private_nic", None) if field is not None: args["private_nic"] = unmarshal_PrivateNIC(field) + else: + args["private_nic"] = None return GetPrivateNICResponse(**args) @@ -1578,6 +1688,8 @@ def unmarshal_GetSecurityGroupResponse(data: Any) -> GetSecurityGroupResponse: field = data.get("security_group", None) if field is not None: args["security_group"] = unmarshal_SecurityGroup(field) + else: + args["security_group"] = None return GetSecurityGroupResponse(**args) @@ -1593,6 +1705,8 @@ def unmarshal_GetSecurityGroupRuleResponse(data: Any) -> GetSecurityGroupRuleRes field = data.get("rule", None) if field is not None: args["rule"] = unmarshal_SecurityGroupRule(field) + else: + args["rule"] = None return GetSecurityGroupRuleResponse(**args) @@ -1608,6 +1722,8 @@ def unmarshal_GetServerResponse(data: Any) -> GetServerResponse: field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return GetServerResponse(**args) @@ -1668,6 +1784,8 @@ def unmarshal_GetSnapshotResponse(data: Any) -> GetSnapshotResponse: field = data.get("snapshot", None) if field is not None: args["snapshot"] = unmarshal_Snapshot(field) + else: + args["snapshot"] = None return GetSnapshotResponse(**args) @@ -1683,6 +1801,8 @@ def unmarshal_GetVolumeResponse(data: Any) -> GetVolumeResponse: field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None return GetVolumeResponse(**args) @@ -1900,10 +2020,14 @@ def unmarshal_ServerTypeNetworkInterface(data: Any) -> ServerTypeNetworkInterfac field = data.get("internal_bandwidth", None) if field is not None: args["internal_bandwidth"] = field + else: + args["internal_bandwidth"] = None field = data.get("internet_bandwidth", None) if field is not None: args["internet_bandwidth"] = field + else: + args["internet_bandwidth"] = None return ServerTypeNetworkInterface(**args) @@ -1944,6 +2068,8 @@ def unmarshal_ServerTypeCapabilities(data: Any) -> ServerTypeCapabilities: field = data.get("block_storage", None) if field is not None: args["block_storage"] = field + else: + args["block_storage"] = None return ServerTypeCapabilities(**args) @@ -1971,10 +2097,14 @@ def unmarshal_ServerTypeNetwork(data: Any) -> ServerTypeNetwork: field = data.get("sum_internal_bandwidth", None) if field is not None: args["sum_internal_bandwidth"] = field + else: + args["sum_internal_bandwidth"] = None field = data.get("sum_internet_bandwidth", None) if field is not None: args["sum_internet_bandwidth"] = field + else: + args["sum_internet_bandwidth"] = None return ServerTypeNetwork(**args) @@ -1992,6 +2122,8 @@ def unmarshal_ServerTypeVolumeConstraintsByType( field = data.get("l_ssd", None) if field is not None: args["l_ssd"] = unmarshal_ServerTypeVolumeConstraintSizes(field) + else: + args["l_ssd"] = None return ServerTypeVolumeConstraintsByType(**args) @@ -2007,6 +2139,8 @@ def unmarshal_ServerType(data: Any) -> ServerType: field = data.get("monthly_price", None) if field is not None: args["monthly_price"] = field + else: + args["monthly_price"] = None field = data.get("hourly_price", None) if field is not None: @@ -2037,26 +2171,38 @@ def unmarshal_ServerType(data: Any) -> ServerType: args["per_volume_constraint"] = unmarshal_ServerTypeVolumeConstraintsByType( field ) + else: + args["per_volume_constraint"] = None field = data.get("volumes_constraint", None) if field is not None: args["volumes_constraint"] = unmarshal_ServerTypeVolumeConstraintSizes(field) + else: + args["volumes_constraint"] = None field = data.get("gpu", None) if field is not None: args["gpu"] = field + else: + args["gpu"] = None field = data.get("network", None) if field is not None: args["network"] = unmarshal_ServerTypeNetwork(field) + else: + args["network"] = None field = data.get("capabilities", None) if field is not None: args["capabilities"] = unmarshal_ServerTypeCapabilities(field) + else: + args["capabilities"] = None field = data.get("scratch_storage_max_size", None) if field is not None: args["scratch_storage_max_size"] = field + else: + args["scratch_storage_max_size"] = None return ServerType(**args) @@ -2175,10 +2321,14 @@ def unmarshal_VolumeType(data: Any) -> VolumeType: field = data.get("capabilities", None) if field is not None: args["capabilities"] = unmarshal_VolumeTypeCapabilities(field) + else: + args["capabilities"] = None field = data.get("constraints", None) if field is not None: args["constraints"] = unmarshal_VolumeTypeConstraints(field) + else: + args["constraints"] = None return VolumeType(**args) @@ -2227,6 +2377,8 @@ def unmarshal_MigrationPlan(data: Any) -> MigrationPlan: field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None return MigrationPlan(**args) @@ -2242,6 +2394,8 @@ def unmarshal_ServerActionResponse(data: Any) -> ServerActionResponse: field = data.get("task", None) if field is not None: args["task"] = unmarshal_Task(field) + else: + args["task"] = None return ServerActionResponse(**args) @@ -2257,6 +2411,8 @@ def unmarshal_SetPlacementGroupResponse(data: Any) -> SetPlacementGroupResponse: field = data.get("placement_group", None) if field is not None: args["placement_group"] = unmarshal_PlacementGroup(field) + else: + args["placement_group"] = None return SetPlacementGroupResponse(**args) @@ -2312,6 +2468,8 @@ def unmarshal_UpdateImageResponse(data: Any) -> UpdateImageResponse: field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) + else: + args["image"] = None return UpdateImageResponse(**args) @@ -2327,6 +2485,8 @@ def unmarshal_UpdateIpResponse(data: Any) -> UpdateIpResponse: field = data.get("ip", None) if field is not None: args["ip"] = unmarshal_Ip(field) + else: + args["ip"] = None return UpdateIpResponse(**args) @@ -2342,6 +2502,8 @@ def unmarshal_UpdatePlacementGroupResponse(data: Any) -> UpdatePlacementGroupRes field = data.get("placement_group", None) if field is not None: args["placement_group"] = unmarshal_PlacementGroup(field) + else: + args["placement_group"] = None return UpdatePlacementGroupResponse(**args) @@ -2378,6 +2540,8 @@ def unmarshal_UpdateSecurityGroupResponse(data: Any) -> UpdateSecurityGroupRespo field = data.get("security_group", None) if field is not None: args["security_group"] = unmarshal_SecurityGroup(field) + else: + args["security_group"] = None return UpdateSecurityGroupResponse(**args) @@ -2395,6 +2559,8 @@ def unmarshal_UpdateSecurityGroupRuleResponse( field = data.get("rule", None) if field is not None: args["rule"] = unmarshal_SecurityGroupRule(field) + else: + args["rule"] = None return UpdateSecurityGroupRuleResponse(**args) @@ -2410,6 +2576,8 @@ def unmarshal_UpdateServerResponse(data: Any) -> UpdateServerResponse: field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return UpdateServerResponse(**args) @@ -2425,6 +2593,8 @@ def unmarshal_UpdateSnapshotResponse(data: Any) -> UpdateSnapshotResponse: field = data.get("snapshot", None) if field is not None: args["snapshot"] = unmarshal_Snapshot(field) + else: + args["snapshot"] = None return UpdateSnapshotResponse(**args) @@ -2440,6 +2610,8 @@ def unmarshal_UpdateVolumeResponse(data: Any) -> UpdateVolumeResponse: field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None return UpdateVolumeResponse(**args) @@ -2455,6 +2627,8 @@ def unmarshal__SetImageResponse(data: Any) -> _SetImageResponse: field = data.get("image", None) if field is not None: args["image"] = unmarshal_Image(field) + else: + args["image"] = None return _SetImageResponse(**args) @@ -2470,6 +2644,8 @@ def unmarshal__SetSecurityGroupResponse(data: Any) -> _SetSecurityGroupResponse: field = data.get("security_group", None) if field is not None: args["security_group"] = unmarshal_SecurityGroup(field) + else: + args["security_group"] = None return _SetSecurityGroupResponse(**args) @@ -2485,6 +2661,8 @@ def unmarshal__SetSecurityGroupRuleResponse(data: Any) -> _SetSecurityGroupRuleR field = data.get("rule", None) if field is not None: args["rule"] = unmarshal_SecurityGroupRule(field) + else: + args["rule"] = None return _SetSecurityGroupRuleResponse(**args) @@ -2500,6 +2678,8 @@ def unmarshal__SetServerResponse(data: Any) -> _SetServerResponse: field = data.get("server", None) if field is not None: args["server"] = unmarshal_Server(field) + else: + args["server"] = None return _SetServerResponse(**args) @@ -2515,6 +2695,8 @@ def unmarshal__SetSnapshotResponse(data: Any) -> _SetSnapshotResponse: field = data.get("snapshot", None) if field is not None: args["snapshot"] = unmarshal_Snapshot(field) + else: + args["snapshot"] = None return _SetSnapshotResponse(**args) diff --git a/scaleway/scaleway/iot/v1/marshalling.py b/scaleway/scaleway/iot/v1/marshalling.py index 97f75d2c5..0397cfe72 100644 --- a/scaleway/scaleway/iot/v1/marshalling.py +++ b/scaleway/scaleway/iot/v1/marshalling.py @@ -75,6 +75,8 @@ def unmarshal_DeviceMessageFiltersRule(data: Any) -> DeviceMessageFiltersRule: field = data.get("topics", None) if field is not None: args["topics"] = field + else: + args["topics"] = None return DeviceMessageFiltersRule(**args) @@ -90,10 +92,14 @@ def unmarshal_DeviceMessageFilters(data: Any) -> DeviceMessageFilters: field = data.get("publish", None) if field is not None: args["publish"] = unmarshal_DeviceMessageFiltersRule(field) + else: + args["publish"] = None field = data.get("subscribe", None) if field is not None: args["subscribe"] = unmarshal_DeviceMessageFiltersRule(field) + else: + args["subscribe"] = None return DeviceMessageFilters(**args) @@ -131,6 +137,8 @@ def unmarshal_Device(data: Any) -> Device: args["last_activity_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_activity_at"] = None field = data.get("is_connected", None) if field is not None: @@ -151,14 +159,20 @@ def unmarshal_Device(data: Any) -> Device: field = data.get("message_filters", None) if field is not None: args["message_filters"] = unmarshal_DeviceMessageFilters(field) + else: + args["message_filters"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Device(**args) @@ -179,7 +193,7 @@ def unmarshal_Network(data: Any) -> Network: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -198,6 +212,8 @@ def unmarshal_Network(data: Any) -> Network: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return Network(**args) @@ -288,14 +304,20 @@ def unmarshal_Hub(data: Any) -> Hub: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("twins_graphite_config", None) if field is not None: args["twins_graphite_config"] = unmarshal_HubTwinsGraphiteConfig(field) + else: + args["twins_graphite_config"] = None return Hub(**args) @@ -330,10 +352,14 @@ def unmarshal_CreateDeviceResponse(data: Any) -> CreateDeviceResponse: field = data.get("device", None) if field is not None: args["device"] = unmarshal_Device(field) + else: + args["device"] = None field = data.get("certificate", None) if field is not None: args["certificate"] = unmarshal_Certificate(field) + else: + args["certificate"] = None return CreateDeviceResponse(**args) @@ -353,6 +379,8 @@ def unmarshal_CreateNetworkResponse(data: Any) -> CreateNetworkResponse: field = data.get("network", None) if field is not None: args["network"] = unmarshal_Network(field) + else: + args["network"] = None return CreateNetworkResponse(**args) @@ -372,6 +400,8 @@ def unmarshal_GetDeviceCertificateResponse(data: Any) -> GetDeviceCertificateRes field = data.get("device", None) if field is not None: args["device"] = unmarshal_Device(field) + else: + args["device"] = None return GetDeviceCertificateResponse(**args) @@ -510,17 +540,21 @@ def unmarshal_RouteSummary(data: Any) -> RouteSummary: if field is not None: args["topic"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return RouteSummary(**args) @@ -595,10 +629,14 @@ def unmarshal_RenewDeviceCertificateResponse( field = data.get("device", None) if field is not None: args["device"] = unmarshal_Device(field) + else: + args["device"] = None field = data.get("certificate", None) if field is not None: args["certificate"] = unmarshal_Certificate(field) + else: + args["certificate"] = None return RenewDeviceCertificateResponse(**args) @@ -716,29 +754,39 @@ def unmarshal_Route(data: Any) -> Route: if field is not None: args["topic"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("s3_config", None) if field is not None: args["s3_config"] = unmarshal_RouteS3Config(field) + else: + args["s3_config"] = None field = data.get("db_config", None) if field is not None: args["db_config"] = unmarshal_RouteDatabaseConfig(field) + else: + args["db_config"] = None field = data.get("rest_config", None) if field is not None: args["rest_config"] = unmarshal_RouteRestConfig(field) + else: + args["rest_config"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Route(**args) @@ -758,6 +806,8 @@ def unmarshal_SetDeviceCertificateResponse(data: Any) -> SetDeviceCertificateRes field = data.get("device", None) if field is not None: args["device"] = unmarshal_Device(field) + else: + args["device"] = None return SetDeviceCertificateResponse(**args) @@ -785,6 +835,8 @@ def unmarshal_TwinDocument(data: Any) -> TwinDocument: field = data.get("data", None) if field is not None: args["data"] = field + else: + args["data"] = None return TwinDocument(**args) diff --git a/scaleway/scaleway/jobs/v1alpha1/marshalling.py b/scaleway/scaleway/jobs/v1alpha1/marshalling.py index ecb70454a..71b680b67 100644 --- a/scaleway/scaleway/jobs/v1alpha1/marshalling.py +++ b/scaleway/scaleway/jobs/v1alpha1/marshalling.py @@ -78,10 +78,14 @@ def unmarshal_JobDefinition(data: Any) -> JobDefinition: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("environment_variables", None) if field is not None: @@ -102,10 +106,14 @@ def unmarshal_JobDefinition(data: Any) -> JobDefinition: field = data.get("job_timeout", None) if field is not None: args["job_timeout"] = field + else: + args["job_timeout"] = None field = data.get("cron_schedule", None) if field is not None: args["cron_schedule"] = unmarshal_CronSchedule(field) + else: + args["cron_schedule"] = None return JobDefinition(**args) @@ -149,24 +157,34 @@ def unmarshal_JobRun(data: Any) -> JobRun: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("terminated_at", None) if field is not None: args["terminated_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["terminated_at"] = None field = data.get("exit_code", None) if field is not None: args["exit_code"] = field + else: + args["exit_code"] = None field = data.get("run_duration", None) if field is not None: args["run_duration"] = field + else: + args["run_duration"] = None field = data.get("environment_variables", None) if field is not None: @@ -183,6 +201,8 @@ def unmarshal_JobRun(data: Any) -> JobRun: field = data.get("started_at", None) if field is not None: args["started_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["started_at"] = None return JobRun(**args) diff --git a/scaleway/scaleway/k8s/v1/marshalling.py b/scaleway/scaleway/k8s/v1/marshalling.py index 9f324c14b..8f813da49 100644 --- a/scaleway/scaleway/k8s/v1/marshalling.py +++ b/scaleway/scaleway/k8s/v1/marshalling.py @@ -114,10 +114,14 @@ def unmarshal_Pool(data: Any) -> Pool: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("min_size", None) if field is not None: @@ -162,14 +166,20 @@ def unmarshal_Pool(data: Any) -> Pool: field = data.get("placement_group_id", None) if field is not None: args["placement_group_id"] = field + else: + args["placement_group_id"] = None field = data.get("upgrade_policy", None) if field is not None: args["upgrade_policy"] = unmarshal_PoolUpgradePolicy(field) + else: + args["upgrade_policy"] = None field = data.get("root_volume_size", None) if field is not None: args["root_volume_size"] = field + else: + args["root_volume_size"] = None return Pool(**args) @@ -253,6 +263,8 @@ def unmarshal_ClusterAutoUpgrade(data: Any) -> ClusterAutoUpgrade: field = data.get("maintenance_window", None) if field is not None: args["maintenance_window"] = unmarshal_MaintenanceWindow(field) + else: + args["maintenance_window"] = None return ClusterAutoUpgrade(**args) @@ -359,7 +371,7 @@ def unmarshal_Cluster(data: Any) -> Cluster: if field is not None: args["id"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -426,32 +438,52 @@ def unmarshal_Cluster(data: Any) -> Cluster: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("autoscaler_config", None) if field is not None: args["autoscaler_config"] = unmarshal_ClusterAutoscalerConfig(field) + else: + args["autoscaler_config"] = None field = data.get("auto_upgrade", None) if field is not None: args["auto_upgrade"] = unmarshal_ClusterAutoUpgrade(field) + else: + args["auto_upgrade"] = None field = data.get("open_id_connect_config", None) if field is not None: args["open_id_connect_config"] = unmarshal_ClusterOpenIDConnectConfig(field) + else: + args["open_id_connect_config"] = None field = data.get("private_network_id", None) if field is not None: args["private_network_id"] = field + else: + args["private_network_id"] = None field = data.get("commitment_ends_at", None) if field is not None: args["commitment_ends_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["commitment_ends_at"] = None + + field = data.get("routed_ip_enabled", None) + if field is not None: + args["routed_ip_enabled"] = field + else: + args["routed_ip_enabled"] = None field = data.get("routed_ip_enabled", None) if field is not None: @@ -495,14 +527,20 @@ def unmarshal_Node(data: Any) -> Node: field = data.get("public_ip_v4", None) if field is not None: args["public_ip_v4"] = field + else: + args["public_ip_v4"] = None field = data.get("public_ip_v6", None) if field is not None: args["public_ip_v6"] = field + else: + args["public_ip_v6"] = None field = data.get("conditions", None) if field is not None: args["conditions"] = field + else: + args["conditions"] = None field = data.get("status", None) if field is not None: @@ -511,14 +549,20 @@ def unmarshal_Node(data: Any) -> Node: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Node(**args) @@ -656,6 +700,8 @@ def unmarshal_ClusterType(data: Any) -> ClusterType: field = data.get("commitment_delay", None) if field is not None: args["commitment_delay"] = field + else: + args["commitment_delay"] = None return ClusterType(**args) diff --git a/scaleway/scaleway/lb/v1/marshalling.py b/scaleway/scaleway/lb/v1/marshalling.py index b68ee34b9..f339251e9 100644 --- a/scaleway/scaleway/lb/v1/marshalling.py +++ b/scaleway/scaleway/lb/v1/marshalling.py @@ -140,10 +140,14 @@ def unmarshal_Ip(data: Any) -> Ip: field = data.get("lb_id", None) if field is not None: args["lb_id"] = field + else: + args["lb_id"] = None field = data.get("region", None) if field is not None: args["region"] = field + else: + args["region"] = None return Ip(**args) @@ -197,10 +201,14 @@ def unmarshal_Subscriber(data: Any) -> Subscriber: field = data.get("email_config", None) if field is not None: args["email_config"] = unmarshal_SubscriberEmailConfig(field) + else: + args["email_config"] = None field = data.get("webhook_config", None) if field is not None: args["webhook_config"] = unmarshal_SubscriberWebhookConfig(field) + else: + args["webhook_config"] = None return Subscriber(**args) @@ -228,6 +236,8 @@ def unmarshal_HealthCheckHttpConfig(data: Any) -> HealthCheckHttpConfig: field = data.get("code", None) if field is not None: args["code"] = field + else: + args["code"] = None return HealthCheckHttpConfig(**args) @@ -259,6 +269,8 @@ def unmarshal_HealthCheckHttpsConfig(data: Any) -> HealthCheckHttpsConfig: field = data.get("code", None) if field is not None: args["code"] = field + else: + args["code"] = None return HealthCheckHttpsConfig(**args) @@ -345,18 +357,26 @@ def unmarshal_HealthCheck(data: Any) -> HealthCheck: field = data.get("check_delay", None) if field is not None: args["check_delay"] = field + else: + args["check_delay"] = None field = data.get("check_timeout", None) if field is not None: args["check_timeout"] = field + else: + args["check_timeout"] = None field = data.get("tcp_config", None) if field is not None: args["tcp_config"] = unmarshal_HealthCheckTcpConfig(field) + else: + args["tcp_config"] = None field = data.get("mysql_config", None) if field is not None: args["mysql_config"] = unmarshal_HealthCheckMysqlConfig(field) + else: + args["mysql_config"] = None field = data.get("check_send_proxy", None) if field is not None: @@ -365,26 +385,38 @@ def unmarshal_HealthCheck(data: Any) -> HealthCheck: field = data.get("pgsql_config", None) if field is not None: args["pgsql_config"] = unmarshal_HealthCheckPgsqlConfig(field) + else: + args["pgsql_config"] = None field = data.get("ldap_config", None) if field is not None: args["ldap_config"] = unmarshal_HealthCheckLdapConfig(field) + else: + args["ldap_config"] = None field = data.get("redis_config", None) if field is not None: args["redis_config"] = unmarshal_HealthCheckRedisConfig(field) + else: + args["redis_config"] = None field = data.get("http_config", None) if field is not None: args["http_config"] = unmarshal_HealthCheckHttpConfig(field) + else: + args["http_config"] = None field = data.get("https_config", None) if field is not None: args["https_config"] = unmarshal_HealthCheckHttpsConfig(field) + else: + args["https_config"] = None field = data.get("transient_check_delay", None) if field is not None: args["transient_check_delay"] = field + else: + args["transient_check_delay"] = None return HealthCheck(**args) @@ -416,14 +448,20 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("region", None) if field is not None: args["region"] = field + else: + args["region"] = None return Instance(**args) @@ -482,7 +520,7 @@ def unmarshal_Lb(data: Any) -> Lb: if field is not None: args["backend_count"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -505,18 +543,26 @@ def unmarshal_Lb(data: Any) -> Lb: field = data.get("subscriber", None) if field is not None: args["subscriber"] = unmarshal_Subscriber(field) + else: + args["subscriber"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("region", None) if field is not None: args["region"] = field + else: + args["region"] = None return Lb(**args) @@ -572,62 +618,92 @@ def unmarshal_Backend(data: Any) -> Backend: field = data.get("health_check", None) if field is not None: args["health_check"] = unmarshal_HealthCheck(field) + else: + args["health_check"] = None field = data.get("lb", None) if field is not None: args["lb"] = unmarshal_Lb(field) + else: + args["lb"] = None field = data.get("send_proxy_v2", None) if field is not None: args["send_proxy_v2"] = field + else: + args["send_proxy_v2"] = None field = data.get("timeout_server", None) if field is not None: args["timeout_server"] = field + else: + args["timeout_server"] = None field = data.get("timeout_connect", None) if field is not None: args["timeout_connect"] = field + else: + args["timeout_connect"] = None field = data.get("timeout_tunnel", None) if field is not None: args["timeout_tunnel"] = field + else: + args["timeout_tunnel"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("failover_host", None) if field is not None: args["failover_host"] = field + else: + args["failover_host"] = None field = data.get("ssl_bridging", None) if field is not None: args["ssl_bridging"] = field + else: + args["ssl_bridging"] = None field = data.get("ignore_ssl_server_verify", None) if field is not None: args["ignore_ssl_server_verify"] = field + else: + args["ignore_ssl_server_verify"] = None field = data.get("redispatch_attempt_count", None) if field is not None: args["redispatch_attempt_count"] = field + else: + args["redispatch_attempt_count"] = None field = data.get("max_retries", None) if field is not None: args["max_retries"] = field + else: + args["max_retries"] = None field = data.get("max_connections", None) if field is not None: args["max_connections"] = field + else: + args["max_connections"] = None field = data.get("timeout_queue", None) if field is not None: args["timeout_queue"] = field + else: + args["timeout_queue"] = None return Backend(**args) @@ -640,7 +716,7 @@ def unmarshal_Certificate(data: Any) -> Certificate: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -669,16 +745,22 @@ def unmarshal_Certificate(data: Any) -> Certificate: args["not_valid_before"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["not_valid_before"] = None field = data.get("not_valid_after", None) if field is not None: args["not_valid_after"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["not_valid_after"] = None field = data.get("lb", None) if field is not None: args["lb"] = unmarshal_Lb(field) + else: + args["lb"] = None field = data.get("name", None) if field is not None: @@ -687,14 +769,20 @@ def unmarshal_Certificate(data: Any) -> Certificate: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("status_details", None) if field is not None: args["status_details"] = field + else: + args["status_details"] = None return Certificate(**args) @@ -730,26 +818,38 @@ def unmarshal_Frontend(data: Any) -> Frontend: field = data.get("backend", None) if field is not None: args["backend"] = unmarshal_Backend(field) + else: + args["backend"] = None field = data.get("lb", None) if field is not None: args["lb"] = unmarshal_Lb(field) + else: + args["lb"] = None field = data.get("timeout_client", None) if field is not None: args["timeout_client"] = field + else: + args["timeout_client"] = None field = data.get("certificate", None) if field is not None: args["certificate"] = unmarshal_Certificate(field) + else: + args["certificate"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Frontend(**args) @@ -762,7 +862,7 @@ def unmarshal_AclActionRedirect(data: Any) -> AclActionRedirect: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -773,6 +873,8 @@ def unmarshal_AclActionRedirect(data: Any) -> AclActionRedirect: field = data.get("code", None) if field is not None: args["code"] = field + else: + args["code"] = None return AclActionRedirect(**args) @@ -785,13 +887,15 @@ def unmarshal_AclAction(data: Any) -> AclAction: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("redirect", None) if field is not None: args["redirect"] = unmarshal_AclActionRedirect(field) + else: + args["redirect"] = None return AclAction(**args) @@ -823,6 +927,8 @@ def unmarshal_AclMatch(data: Any) -> AclMatch: field = data.get("http_filter_option", None) if field is not None: args["http_filter_option"] = field + else: + args["http_filter_option"] = None return AclMatch(**args) @@ -854,22 +960,32 @@ def unmarshal_Acl(data: Any) -> Acl: field = data.get("match", None) if field is not None: args["match"] = unmarshal_AclMatch(field) + else: + args["match"] = None field = data.get("action", None) if field is not None: args["action"] = unmarshal_AclAction(field) + else: + args["action"] = None field = data.get("frontend", None) if field is not None: args["frontend"] = unmarshal_Frontend(field) + else: + args["frontend"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Acl(**args) @@ -885,6 +1001,8 @@ def unmarshal_PrivateNetworkDHCPConfig(data: Any) -> PrivateNetworkDHCPConfig: field = data.get("ip_id", None) if field is not None: args["ip_id"] = field + else: + args["ip_id"] = None return PrivateNetworkDHCPConfig(**args) @@ -911,6 +1029,8 @@ def unmarshal_PrivateNetworkStaticConfig(data: Any) -> PrivateNetworkStaticConfi field = data.get("ip_address", None) if field is not None: args["ip_address"] = field + else: + args["ip_address"] = None return PrivateNetworkStaticConfig(**args) @@ -938,26 +1058,38 @@ def unmarshal_PrivateNetwork(data: Any) -> PrivateNetwork: field = data.get("lb", None) if field is not None: args["lb"] = unmarshal_Lb(field) + else: + args["lb"] = None field = data.get("static_config", None) if field is not None: args["static_config"] = unmarshal_PrivateNetworkStaticConfig(field) + else: + args["static_config"] = None field = data.get("dhcp_config", None) if field is not None: args["dhcp_config"] = unmarshal_PrivateNetworkDHCPConfig(field) + else: + args["dhcp_config"] = None field = data.get("ipam_config", None) if field is not None: args["ipam_config"] = unmarshal_PrivateNetworkIpamConfig(field) + else: + args["ipam_config"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return PrivateNetwork(**args) @@ -973,10 +1105,14 @@ def unmarshal_RouteMatch(data: Any) -> RouteMatch: field = data.get("sni", None) if field is not None: args["sni"] = field + else: + args["sni"] = None field = data.get("host_header", None) if field is not None: args["host_header"] = field + else: + args["host_header"] = None return RouteMatch(**args) @@ -1004,14 +1140,20 @@ def unmarshal_Route(data: Any) -> Route: field = data.get("match", None) if field is not None: args["match"] = unmarshal_RouteMatch(field) + else: + args["match"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Route(**args) @@ -1049,6 +1191,8 @@ def unmarshal_BackendServerStats(data: Any) -> BackendServerStats: args["server_state_changed_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["server_state_changed_at"] = None return BackendServerStats(**args) @@ -1244,6 +1388,8 @@ def unmarshal_LbType(data: Any) -> LbType: field = data.get("region", None) if field is not None: args["region"] = field + else: + args["region"] = None return LbType(**args) diff --git a/scaleway/scaleway/llm_inference/v1beta1/marshalling.py b/scaleway/scaleway/llm_inference/v1beta1/marshalling.py index e81465983..5fa39a3ad 100644 --- a/scaleway/scaleway/llm_inference/v1beta1/marshalling.py +++ b/scaleway/scaleway/llm_inference/v1beta1/marshalling.py @@ -87,10 +87,14 @@ def unmarshal_Endpoint(data: Any) -> Endpoint: field = data.get("public_access", None) if field is not None: args["public_access"] = unmarshal_EndpointPublicAccessDetails(field) + else: + args["public_access"] = None field = data.get("private_network", None) if field is not None: args["private_network"] = unmarshal_EndpointPrivateNetworkDetails(field) + else: + args["private_network"] = None return Endpoint(**args) @@ -156,14 +160,20 @@ def unmarshal_Deployment(data: Any) -> Deployment: field = data.get("error_message", None) if field is not None: args["error_message"] = field + else: + args["error_message"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Deployment(**args) @@ -187,10 +197,14 @@ def unmarshal_ModelS3Model(data: Any) -> ModelS3Model: field = data.get("node_type", None) if field is not None: args["node_type"] = field + else: + args["node_type"] = None field = data.get("triton_server_version", None) if field is not None: args["triton_server_version"] = field + else: + args["triton_server_version"] = None return ModelS3Model(**args) @@ -250,14 +264,20 @@ def unmarshal_Model(data: Any) -> Model: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("s3_model", None) if field is not None: args["s3_model"] = unmarshal_ModelS3Model(field) + else: + args["s3_model"] = None return Model(**args) @@ -433,10 +453,14 @@ def unmarshal_NodeType(data: Any) -> NodeType: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return NodeType(**args) diff --git a/scaleway/scaleway/marketplace/v2/marshalling.py b/scaleway/scaleway/marketplace/v2/marshalling.py index eb495e085..c3c22a3e1 100644 --- a/scaleway/scaleway/marketplace/v2/marshalling.py +++ b/scaleway/scaleway/marketplace/v2/marshalling.py @@ -74,16 +74,22 @@ def unmarshal_Image(data: Any) -> Image: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("valid_until", None) if field is not None: args["valid_until"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["valid_until"] = None return Image(**args) @@ -116,7 +122,7 @@ def unmarshal_LocalImage(data: Any) -> LocalImage: if field is not None: args["label"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -142,16 +148,22 @@ def unmarshal_Version(data: Any) -> Version: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("published_at", None) if field is not None: args["published_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["published_at"] = None return Version(**args) diff --git a/scaleway/scaleway/mnq/v1beta1/marshalling.py b/scaleway/scaleway/mnq/v1beta1/marshalling.py index 1ffebec45..30fb4cffe 100644 --- a/scaleway/scaleway/mnq/v1beta1/marshalling.py +++ b/scaleway/scaleway/mnq/v1beta1/marshalling.py @@ -64,10 +64,14 @@ def unmarshal_NatsAccount(data: Any) -> NatsAccount: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return NatsAccount(**args) @@ -118,14 +122,20 @@ def unmarshal_NatsCredentials(data: Any) -> NatsCredentials: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("credentials", None) if field is not None: args["credentials"] = unmarshal_File(field) + else: + args["credentials"] = None return NatsCredentials(**args) @@ -141,14 +151,20 @@ def unmarshal_SnsPermissions(data: Any) -> SnsPermissions: field = data.get("can_publish", None) if field is not None: args["can_publish"] = field + else: + args["can_publish"] = None field = data.get("can_receive", None) if field is not None: args["can_receive"] = field + else: + args["can_receive"] = None field = data.get("can_manage", None) if field is not None: args["can_manage"] = field + else: + args["can_manage"] = None return SnsPermissions(**args) @@ -192,14 +208,20 @@ def unmarshal_SnsCredentials(data: Any) -> SnsCredentials: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("permissions", None) if field is not None: args["permissions"] = unmarshal_SnsPermissions(field) + else: + args["permissions"] = None return SnsCredentials(**args) @@ -215,14 +237,20 @@ def unmarshal_SqsPermissions(data: Any) -> SqsPermissions: field = data.get("can_publish", None) if field is not None: args["can_publish"] = field + else: + args["can_publish"] = None field = data.get("can_receive", None) if field is not None: args["can_receive"] = field + else: + args["can_receive"] = None field = data.get("can_manage", None) if field is not None: args["can_manage"] = field + else: + args["can_manage"] = None return SqsPermissions(**args) @@ -266,14 +294,20 @@ def unmarshal_SqsCredentials(data: Any) -> SqsCredentials: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("permissions", None) if field is not None: args["permissions"] = unmarshal_SqsPermissions(field) + else: + args["permissions"] = None return SqsCredentials(**args) @@ -389,10 +423,14 @@ def unmarshal_SnsInfo(data: Any) -> SnsInfo: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return SnsInfo(**args) @@ -424,10 +462,14 @@ def unmarshal_SqsInfo(data: Any) -> SqsInfo: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return SqsInfo(**args) diff --git a/scaleway/scaleway/rdb/v1/marshalling.py b/scaleway/scaleway/rdb/v1/marshalling.py index 4d2440648..0401c6c32 100644 --- a/scaleway/scaleway/rdb/v1/marshalling.py +++ b/scaleway/scaleway/rdb/v1/marshalling.py @@ -166,26 +166,38 @@ def unmarshal_Endpoint(data: Any) -> Endpoint: field = data.get("ip", None) if field is not None: args["ip"] = field + else: + args["ip"] = None field = data.get("name", None) if field is not None: args["name"] = field + else: + args["name"] = None field = data.get("private_network", None) if field is not None: args["private_network"] = unmarshal_EndpointPrivateNetworkDetails(field) + else: + args["private_network"] = None field = data.get("load_balancer", None) if field is not None: args["load_balancer"] = unmarshal_EndpointLoadBalancerDetails(field) + else: + args["load_balancer"] = None field = data.get("direct_access", None) if field is not None: args["direct_access"] = unmarshal_EndpointDirectAccessDetails(field) + else: + args["direct_access"] = None field = data.get("hostname", None) if field is not None: args["hostname"] = field + else: + args["hostname"] = None return Endpoint(**args) @@ -209,18 +221,26 @@ def unmarshal_Maintenance(data: Any) -> Maintenance: field = data.get("starts_at", None) if field is not None: args["starts_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["starts_at"] = None field = data.get("stops_at", None) if field is not None: args["stops_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["stops_at"] = None field = data.get("closed_at", None) if field is not None: args["closed_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["closed_at"] = None field = data.get("forced_at", None) if field is not None: args["forced_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["forced_at"] = None return Maintenance(**args) @@ -293,18 +313,26 @@ def unmarshal_DatabaseBackup(data: Any) -> DatabaseBackup: field = data.get("size", None) if field is not None: args["size"] = field + else: + args["size"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("instance_name", None) if field is not None: @@ -321,12 +349,16 @@ def unmarshal_DatabaseBackup(data: Any) -> DatabaseBackup: field = data.get("download_url", None) if field is not None: args["download_url"] = field + else: + args["download_url"] = None field = data.get("download_url_expires_at", None) if field is not None: args["download_url_expires_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["download_url_expires_at"] = None return DatabaseBackup(**args) @@ -385,14 +417,20 @@ def unmarshal_InstanceLog(data: Any) -> InstanceLog: field = data.get("download_url", None) if field is not None: args["download_url"] = field + else: + args["download_url"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return InstanceLog(**args) @@ -422,6 +460,8 @@ def unmarshal_BackupSchedule(data: Any) -> BackupSchedule: args["next_run_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["next_run_at"] = None return BackupSchedule(**args) @@ -456,10 +496,14 @@ def unmarshal_LogsPolicy(data: Any) -> LogsPolicy: field = data.get("max_age_retention", None) if field is not None: args["max_age_retention"] = field + else: + args["max_age_retention"] = None field = data.get("total_disk_retention", None) if field is not None: args["total_disk_retention"] = field + else: + args["total_disk_retention"] = None return LogsPolicy(**args) @@ -499,7 +543,7 @@ def unmarshal_Volume(data: Any) -> Volume: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -507,7 +551,7 @@ def unmarshal_Volume(data: Any) -> Volume: if field is not None: args["size"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -541,10 +585,14 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("volume", None) if field is not None: args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None field = data.get("project_id", None) if field is not None: @@ -583,10 +631,14 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("endpoint", None) if field is not None: args["endpoint"] = unmarshal_Endpoint(field) + else: + args["endpoint"] = None field = data.get("backup_schedule", None) if field is not None: args["backup_schedule"] = unmarshal_BackupSchedule(field) + else: + args["backup_schedule"] = None field = data.get("read_replicas", None) if field is not None: @@ -623,6 +675,8 @@ def unmarshal_Instance(data: Any) -> Instance: field = data.get("logs_policy", None) if field is not None: args["logs_policy"] = unmarshal_LogsPolicy(field) + else: + args["logs_policy"] = None return Instance(**args) @@ -658,11 +712,11 @@ def unmarshal_SnapshotVolumeType(data: Any) -> SnapshotVolumeType: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -708,22 +762,32 @@ def unmarshal_Snapshot(data: Any) -> Snapshot: field = data.get("size", None) if field is not None: args["size"] = field + else: + args["size"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("volume_type", None) if field is not None: args["volume_type"] = unmarshal_SnapshotVolumeType(field) + else: + args["volume_type"] = None return Snapshot(**args) @@ -778,6 +842,8 @@ def unmarshal_ACLRule(data: Any) -> ACLRule: field = data.get("port", None) if field is not None: args["port"] = field + else: + args["port"] = None return ACLRule(**args) @@ -923,26 +989,38 @@ def unmarshal_EngineSetting(data: Any) -> EngineSetting: field = data.get("unit", None) if field is not None: args["unit"] = field + else: + args["unit"] = None field = data.get("string_constraint", None) if field is not None: args["string_constraint"] = field + else: + args["string_constraint"] = None field = data.get("int_min", None) if field is not None: args["int_min"] = field + else: + args["int_min"] = None field = data.get("int_max", None) if field is not None: args["int_max"] = field + else: + args["int_max"] = None field = data.get("float_min", None) if field is not None: args["float_min"] = field + else: + args["float_min"] = None field = data.get("float_max", None) if field is not None: args["float_max"] = field + else: + args["float_max"] = None return EngineSetting(**args) @@ -988,6 +1066,8 @@ def unmarshal_EngineVersion(data: Any) -> EngineVersion: args["end_of_life"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["end_of_life"] = None return EngineVersion(**args) @@ -1194,7 +1274,7 @@ def unmarshal_NodeTypeVolumeType(data: Any) -> NodeTypeVolumeType: args: Dict[str, Any] = {} - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -1214,7 +1294,7 @@ def unmarshal_NodeTypeVolumeType(data: Any) -> NodeTypeVolumeType: if field is not None: args["chunk_size"] = field - field = data.get("class_", None) + field = data.get("class", None) if field is not None: args["class_"] = field @@ -1260,10 +1340,14 @@ def unmarshal_NodeType(data: Any) -> NodeType: field = data.get("volume_constraint", None) if field is not None: args["volume_constraint"] = unmarshal_NodeTypeVolumeConstraintSizes(field) + else: + args["volume_constraint"] = None field = data.get("is_bssd_compatible", None) if field is not None: args["is_bssd_compatible"] = field + else: + args["is_bssd_compatible"] = None field = data.get("available_volume_types", None) if field is not None: diff --git a/scaleway/scaleway/redis/v1/marshalling.py b/scaleway/scaleway/redis/v1/marshalling.py index f72dabcb0..0db4ab1a6 100644 --- a/scaleway/scaleway/redis/v1/marshalling.py +++ b/scaleway/scaleway/redis/v1/marshalling.py @@ -64,10 +64,14 @@ def unmarshal_ACLRule(data: Any) -> ACLRule: field = data.get("ip_cidr", None) if field is not None: args["ip_cidr"] = field + else: + args["ip_cidr"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None return ACLRule(**args) @@ -133,10 +137,14 @@ def unmarshal_Endpoint(data: Any) -> Endpoint: field = data.get("private_network", None) if field is not None: args["private_network"] = unmarshal_PrivateNetwork(field) + else: + args["private_network"] = None field = data.get("public_network", None) if field is not None: args["public_network"] = unmarshal_PublicNetwork(field) + else: + args["public_network"] = None return Endpoint(**args) @@ -215,10 +223,14 @@ def unmarshal_Cluster(data: Any) -> Cluster: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("acl_rules", None) if field is not None: @@ -333,7 +345,7 @@ def unmarshal_AvailableClusterSetting(data: Any) -> AvailableClusterSetting: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -348,18 +360,26 @@ def unmarshal_AvailableClusterSetting(data: Any) -> AvailableClusterSetting: field = data.get("default_value", None) if field is not None: args["default_value"] = field + else: + args["default_value"] = None field = data.get("max_value", None) if field is not None: args["max_value"] = field + else: + args["max_value"] = None field = data.get("min_value", None) if field is not None: args["min_value"] = field + else: + args["min_value"] = None field = data.get("regex", None) if field is not None: args["regex"] = field + else: + args["regex"] = None return AvailableClusterSetting(**args) @@ -397,6 +417,8 @@ def unmarshal_ClusterVersion(data: Any) -> ClusterVersion: args["end_of_life_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["end_of_life_at"] = None return ClusterVersion(**args) diff --git a/scaleway/scaleway/registry/v1/marshalling.py b/scaleway/scaleway/registry/v1/marshalling.py index d720c1379..1ffa9c7f1 100644 --- a/scaleway/scaleway/registry/v1/marshalling.py +++ b/scaleway/scaleway/registry/v1/marshalling.py @@ -61,14 +61,20 @@ def unmarshal_Image(data: Any) -> Image: field = data.get("status_message", None) if field is not None: args["status_message"] = field + else: + args["status_message"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Image(**args) @@ -132,10 +138,14 @@ def unmarshal_Namespace(data: Any) -> Namespace: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Namespace(**args) @@ -171,10 +181,14 @@ def unmarshal_Tag(data: Any) -> Tag: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Tag(**args) diff --git a/scaleway/scaleway/secret/v1alpha1/marshalling.py b/scaleway/scaleway/secret/v1alpha1/marshalling.py index 6c6efd10f..34a6622c3 100644 --- a/scaleway/scaleway/secret/v1alpha1/marshalling.py +++ b/scaleway/scaleway/secret/v1alpha1/marshalling.py @@ -58,6 +58,8 @@ def unmarshal_Folder(data: Any) -> Folder: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return Folder(**args) @@ -77,10 +79,14 @@ def unmarshal_EphemeralProperties(data: Any) -> EphemeralProperties: field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("expires_once_accessed", None) if field is not None: args["expires_once_accessed"] = field + else: + args["expires_once_accessed"] = None return EphemeralProperties(**args) @@ -112,18 +118,26 @@ def unmarshal_SecretVersion(data: Any) -> SecretVersion: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("ephemeral_properties", None) if field is not None: args["ephemeral_properties"] = unmarshal_EphemeralProperties(field) + else: + args["ephemeral_properties"] = None return SecretVersion(**args) @@ -143,10 +157,14 @@ def unmarshal_EphemeralPolicy(data: Any) -> EphemeralPolicy: field = data.get("time_to_live", None) if field is not None: args["time_to_live"] = field + else: + args["time_to_live"] = None field = data.get("expires_once_accessed", None) if field is not None: args["expires_once_accessed"] = field + else: + args["expires_once_accessed"] = None return EphemeralPolicy(**args) @@ -178,10 +196,14 @@ def unmarshal_Secret(data: Any) -> Secret: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("tags", None) if field is not None: @@ -199,7 +221,7 @@ def unmarshal_Secret(data: Any) -> Secret: if field is not None: args["is_protected"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -214,10 +236,14 @@ def unmarshal_Secret(data: Any) -> Secret: field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("ephemeral_policy", None) if field is not None: args["ephemeral_policy"] = unmarshal_EphemeralPolicy(field) + else: + args["ephemeral_policy"] = None return Secret(**args) @@ -245,6 +271,8 @@ def unmarshal_AccessSecretVersionResponse(data: Any) -> AccessSecretVersionRespo field = data.get("data_crc32", None) if field is not None: args["data_crc32"] = field + else: + args["data_crc32"] = None return AccessSecretVersionResponse(**args) diff --git a/scaleway/scaleway/secret/v1beta1/marshalling.py b/scaleway/scaleway/secret/v1beta1/marshalling.py index 3dfc237a4..a2bb8fb59 100644 --- a/scaleway/scaleway/secret/v1beta1/marshalling.py +++ b/scaleway/scaleway/secret/v1beta1/marshalling.py @@ -41,10 +41,14 @@ def unmarshal_EphemeralProperties(data: Any) -> EphemeralProperties: field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("expires_once_accessed", None) if field is not None: args["expires_once_accessed"] = field + else: + args["expires_once_accessed"] = None return EphemeralProperties(**args) @@ -76,18 +80,26 @@ def unmarshal_SecretVersion(data: Any) -> SecretVersion: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("ephemeral_properties", None) if field is not None: args["ephemeral_properties"] = unmarshal_EphemeralProperties(field) + else: + args["ephemeral_properties"] = None return SecretVersion(**args) @@ -107,10 +119,14 @@ def unmarshal_EphemeralPolicy(data: Any) -> EphemeralPolicy: field = data.get("time_to_live", None) if field is not None: args["time_to_live"] = field + else: + args["time_to_live"] = None field = data.get("expires_once_accessed", None) if field is not None: args["expires_once_accessed"] = field + else: + args["expires_once_accessed"] = None return EphemeralPolicy(**args) @@ -142,10 +158,14 @@ def unmarshal_Secret(data: Any) -> Secret: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("tags", None) if field is not None: @@ -163,7 +183,7 @@ def unmarshal_Secret(data: Any) -> Secret: if field is not None: args["protected"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -178,10 +198,14 @@ def unmarshal_Secret(data: Any) -> Secret: field = data.get("description", None) if field is not None: args["description"] = field + else: + args["description"] = None field = data.get("ephemeral_policy", None) if field is not None: args["ephemeral_policy"] = unmarshal_EphemeralPolicy(field) + else: + args["ephemeral_policy"] = None return Secret(**args) @@ -206,13 +230,15 @@ def unmarshal_AccessSecretVersionResponse(data: Any) -> AccessSecretVersionRespo if field is not None: args["data"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field field = data.get("data_crc32", None) if field is not None: args["data_crc32"] = field + else: + args["data_crc32"] = None return AccessSecretVersionResponse(**args) @@ -259,6 +285,8 @@ def unmarshal_BrowseSecretsResponseItemSecretDetails( field = data.get("ephemeral_policy", None) if field is not None: args["ephemeral_policy"] = unmarshal_EphemeralPolicy(field) + else: + args["ephemeral_policy"] = None return BrowseSecretsResponseItemSecretDetails(**args) @@ -278,18 +306,26 @@ def unmarshal_BrowseSecretsResponseItem(data: Any) -> BrowseSecretsResponseItem: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("secret", None) if field is not None: args["secret"] = unmarshal_BrowseSecretsResponseItemSecretDetails(field) + else: + args["secret"] = None field = data.get("folder", None) if field is not None: args["folder"] = unmarshal_BrowseSecretsResponseItemFolderDetails(field) + else: + args["folder"] = None return BrowseSecretsResponseItem(**args) diff --git a/scaleway/scaleway/serverless_sqldb/v1alpha1/marshalling.py b/scaleway/scaleway/serverless_sqldb/v1alpha1/marshalling.py index 88f441a68..f250a5316 100644 --- a/scaleway/scaleway/serverless_sqldb/v1alpha1/marshalling.py +++ b/scaleway/scaleway/serverless_sqldb/v1alpha1/marshalling.py @@ -51,24 +51,34 @@ def unmarshal_DatabaseBackup(data: Any) -> DatabaseBackup: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("expires_at", None) if field is not None: args["expires_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["expires_at"] = None field = data.get("size", None) if field is not None: args["size"] = field + else: + args["size"] = None field = data.get("download_url", None) if field is not None: args["download_url"] = field + else: + args["download_url"] = None field = data.get("download_url_expires_at", None) if field is not None: args["download_url_expires_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["download_url_expires_at"] = None return DatabaseBackup(**args) @@ -132,6 +142,8 @@ def unmarshal_Database(data: Any) -> Database: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None return Database(**args) diff --git a/scaleway/scaleway/tem/v1alpha1/marshalling.py b/scaleway/scaleway/tem/v1alpha1/marshalling.py index 5d15e237b..f35ef7308 100644 --- a/scaleway/scaleway/tem/v1alpha1/marshalling.py +++ b/scaleway/scaleway/tem/v1alpha1/marshalling.py @@ -53,6 +53,8 @@ def unmarshal_EmailTry(data: Any) -> EmailTry: field = data.get("tried_at", None) if field is not None: args["tried_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["tried_at"] = None return EmailTry(**args) @@ -88,6 +90,8 @@ def unmarshal_Email(data: Any) -> Email: field = data.get("rcpt_to", None) if field is not None: args["rcpt_to"] = field + else: + args["rcpt_to"] = None field = data.get("rcpt_type", None) if field is not None: @@ -118,14 +122,20 @@ def unmarshal_Email(data: Any) -> Email: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("status_details", None) if field is not None: args["status_details"] = field + else: + args["status_details"] = None return Email(**args) @@ -160,6 +170,8 @@ def unmarshal_DomainRecords(data: Any) -> DomainRecords: field = data.get("dmarc", None) if field is not None: args["dmarc"] = unmarshal_DomainRecordsDMARC(field) + else: + args["dmarc"] = None return DomainRecords(**args) @@ -183,16 +195,22 @@ def unmarshal_DomainReputation(data: Any) -> DomainReputation: field = data.get("scored_at", None) if field is not None: args["scored_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["scored_at"] = None field = data.get("previous_score", None) if field is not None: args["previous_score"] = field + else: + args["previous_score"] = None field = data.get("previous_scored_at", None) if field is not None: args["previous_scored_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["previous_scored_at"] = None return DomainReputation(**args) @@ -259,18 +277,24 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("next_check_at", None) if field is not None: args["next_check_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["next_check_at"] = None field = data.get("last_valid_at", None) if field is not None: args["last_valid_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_valid_at"] = None field = data.get("dkim_config", None) if field is not None: @@ -283,22 +307,32 @@ def unmarshal_Domain(data: Any) -> Domain: field = data.get("revoked_at", None) if field is not None: args["revoked_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["revoked_at"] = None field = data.get("last_error", None) if field is not None: args["last_error"] = field + else: + args["last_error"] = None field = data.get("statistics", None) if field is not None: args["statistics"] = unmarshal_DomainStatistics(field) + else: + args["statistics"] = None field = data.get("reputation", None) if field is not None: args["reputation"] = unmarshal_DomainReputation(field) + else: + args["reputation"] = None field = data.get("records", None) if field is not None: args["records"] = unmarshal_DomainRecords(field) + else: + args["records"] = None return Domain(**args) @@ -337,10 +371,14 @@ def unmarshal_DomainLastStatusDkimRecord(data: Any) -> DomainLastStatusDkimRecor args["last_valid_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_valid_at"] = None field = data.get("error", None) if field is not None: args["error"] = field + else: + args["error"] = None return DomainLastStatusDkimRecord(**args) @@ -362,10 +400,14 @@ def unmarshal_DomainLastStatusDmarcRecord(data: Any) -> DomainLastStatusDmarcRec args["last_valid_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_valid_at"] = None field = data.get("error", None) if field is not None: args["error"] = field + else: + args["error"] = None return DomainLastStatusDmarcRecord(**args) @@ -387,10 +429,14 @@ def unmarshal_DomainLastStatusSpfRecord(data: Any) -> DomainLastStatusSpfRecord: args["last_valid_at"] = ( parser.isoparse(field) if isinstance(field, str) else field ) + else: + args["last_valid_at"] = None field = data.get("error", None) if field is not None: args["error"] = field + else: + args["error"] = None return DomainLastStatusSpfRecord(**args) @@ -414,14 +460,20 @@ def unmarshal_DomainLastStatus(data: Any) -> DomainLastStatus: field = data.get("spf_record", None) if field is not None: args["spf_record"] = unmarshal_DomainLastStatusSpfRecord(field) + else: + args["spf_record"] = None field = data.get("dkim_record", None) if field is not None: args["dkim_record"] = unmarshal_DomainLastStatusDkimRecord(field) + else: + args["dkim_record"] = None field = data.get("dmarc_record", None) if field is not None: args["dmarc_record"] = unmarshal_DomainLastStatusDmarcRecord(field) + else: + args["dmarc_record"] = None return DomainLastStatus(**args) diff --git a/scaleway/scaleway/test/v1/marshalling.py b/scaleway/scaleway/test/v1/marshalling.py index 357241bf4..6c3c3e84c 100644 --- a/scaleway/scaleway/test/v1/marshalling.py +++ b/scaleway/scaleway/test/v1/marshalling.py @@ -58,10 +58,14 @@ def unmarshal_Human(data: Any) -> Human: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("hair_count", None) if field is not None: diff --git a/scaleway/scaleway/vpc/v1/marshalling.py b/scaleway/scaleway/vpc/v1/marshalling.py index 32514e39b..b67881a31 100644 --- a/scaleway/scaleway/vpc/v1/marshalling.py +++ b/scaleway/scaleway/vpc/v1/marshalling.py @@ -52,10 +52,14 @@ def unmarshal_PrivateNetwork(data: Any) -> PrivateNetwork: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return PrivateNetwork(**args) diff --git a/scaleway/scaleway/vpc/v2/marshalling.py b/scaleway/scaleway/vpc/v2/marshalling.py index 760ec6923..fbbc5561f 100644 --- a/scaleway/scaleway/vpc/v2/marshalling.py +++ b/scaleway/scaleway/vpc/v2/marshalling.py @@ -48,10 +48,14 @@ def unmarshal_Subnet(data: Any) -> Subnet: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return Subnet(**args) @@ -105,10 +109,14 @@ def unmarshal_PrivateNetwork(data: Any) -> PrivateNetwork: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return PrivateNetwork(**args) @@ -160,10 +168,14 @@ def unmarshal_VPC(data: Any) -> VPC: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return VPC(**args) diff --git a/scaleway/scaleway/vpcgw/v1/marshalling.py b/scaleway/scaleway/vpcgw/v1/marshalling.py index 80c04c1c8..a30639886 100644 --- a/scaleway/scaleway/vpcgw/v1/marshalling.py +++ b/scaleway/scaleway/vpcgw/v1/marshalling.py @@ -83,10 +83,14 @@ def unmarshal_DHCP(data: Any) -> DHCP: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("pool_high", None) if field is not None: @@ -123,14 +127,20 @@ def unmarshal_DHCP(data: Any) -> DHCP: field = data.get("valid_lifetime", None) if field is not None: args["valid_lifetime"] = field + else: + args["valid_lifetime"] = None field = data.get("renew_timer", None) if field is not None: args["renew_timer"] = field + else: + args["renew_timer"] = None field = data.get("rebind_timer", None) if field is not None: args["rebind_timer"] = field + else: + args["rebind_timer"] = None return DHCP(**args) @@ -177,14 +187,20 @@ def unmarshal_GatewayNetwork(data: Any) -> GatewayNetwork: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("mac_address", None) if field is not None: args["mac_address"] = field + else: + args["mac_address"] = None field = data.get("enable_masquerade", None) if field is not None: @@ -205,14 +221,20 @@ def unmarshal_GatewayNetwork(data: Any) -> GatewayNetwork: field = data.get("dhcp", None) if field is not None: args["dhcp"] = unmarshal_DHCP(field) + else: + args["dhcp"] = None field = data.get("address", None) if field is not None: args["address"] = field + else: + args["address"] = None field = data.get("ipam_config", None) if field is not None: args["ipam_config"] = unmarshal_IpamConfig(field) + else: + args["ipam_config"] = None return GatewayNetwork(**args) @@ -252,18 +274,26 @@ def unmarshal_IP(data: Any) -> IP: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("reverse", None) if field is not None: args["reverse"] = field + else: + args["reverse"] = None field = data.get("gateway_id", None) if field is not None: args["gateway_id"] = field + else: + args["gateway_id"] = None return IP(**args) @@ -296,7 +326,7 @@ def unmarshal_DHCPEntry(data: Any) -> DHCPEntry: if field is not None: args["hostname"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -307,10 +337,14 @@ def unmarshal_DHCPEntry(data: Any) -> DHCPEntry: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return DHCPEntry(**args) @@ -361,14 +395,20 @@ def unmarshal_Gateway(data: Any) -> Gateway: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = unmarshal_GatewayType(field) + else: + args["type_"] = None field = data.get("status", None) if field is not None: @@ -399,14 +439,20 @@ def unmarshal_Gateway(data: Any) -> Gateway: field = data.get("ip", None) if field is not None: args["ip"] = unmarshal_IP(field) + else: + args["ip"] = None field = data.get("version", None) if field is not None: args["version"] = field + else: + args["version"] = None field = data.get("can_upgrade_to", None) if field is not None: args["can_upgrade_to"] = field + else: + args["can_upgrade_to"] = None field = data.get("bastion_port", None) if field is not None: @@ -470,10 +516,14 @@ def unmarshal_PATRule(data: Any) -> PATRule: field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None return PATRule(**args) diff --git a/scaleway/scaleway/webhosting/v1alpha1/marshalling.py b/scaleway/scaleway/webhosting/v1alpha1/marshalling.py index e6f39d9a3..7329b7b2e 100644 --- a/scaleway/scaleway/webhosting/v1alpha1/marshalling.py +++ b/scaleway/scaleway/webhosting/v1alpha1/marshalling.py @@ -113,14 +113,20 @@ def unmarshal_Hosting(data: Any) -> Hosting: field = data.get("updated_at", None) if field is not None: args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None field = data.get("created_at", None) if field is not None: args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None field = data.get("platform_number", None) if field is not None: args["platform_number"] = field + else: + args["platform_number"] = None field = data.get("options", None) if field is not None: @@ -171,6 +177,8 @@ def unmarshal_Hosting(data: Any) -> Hosting: field = data.get("cpanel_urls", None) if field is not None: args["cpanel_urls"] = unmarshal_HostingCpanelUrls(field) + else: + args["cpanel_urls"] = None return Hosting(**args) @@ -187,7 +195,7 @@ def unmarshal_DnsRecord(data: Any) -> DnsRecord: if field is not None: args["name"] = field - field = data.get("type_", None) + field = data.get("type", None) if field is not None: args["type_"] = field @@ -206,6 +214,8 @@ def unmarshal_DnsRecord(data: Any) -> DnsRecord: field = data.get("priority", None) if field is not None: args["priority"] = field + else: + args["priority"] = None return DnsRecord(**args) @@ -413,10 +423,14 @@ def unmarshal_Offer(data: Any) -> Offer: field = data.get("product", None) if field is not None: args["product"] = unmarshal_OfferProduct(field) + else: + args["product"] = None field = data.get("price", None) if field is not None: args["price"] = unmarshal_Money(field) + else: + args["price"] = None return Offer(**args)